diff --git a/LICENSE b/LICENSE new file mode 100755 index 00000000..d75f0ee8 --- /dev/null +++ b/LICENSE @@ -0,0 +1,58 @@ +Copyright (c) 2017, Jun-Yan Zhu and Taesung Park +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +--------------------------- LICENSE FOR pix2pix -------------------------------- +BSD License + +For pix2pix software +Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +----------------------------- LICENSE FOR DCGAN -------------------------------- +BSD License + +For dcgan.torch software + +Copyright (c) 2015, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md new file mode 100755 index 00000000..0b20c06f --- /dev/null +++ b/README.md @@ -0,0 +1,214 @@ + + +


+ +# CycleGAN and pix2pix in PyTorch + +We provide PyTorch implementations for both unpaired and paired image-to-image translation. + +The code was written by [Jun-Yan Zhu](https://github.com/junyanz) and [Taesung Park](https://github.com/taesung89), and supported by [Tongzhou Wang](https://ssnl.github.io/). + +This PyTorch implementation produces results comparable to or better than our original Torch software. If you would like to reproduce the same results as in the papers, check out the original [CycleGAN Torch](https://github.com/junyanz/CycleGAN) and [pix2pix Torch](https://github.com/phillipi/pix2pix) code + +**Note**: The current software works well with PyTorch 0.4+. Check out the older [branch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/tree/pytorch0.3.1) that supports PyTorch 0.1-0.3. + +You may find useful information in [training/test tips](docs/tips.md) and [frequently asked questions](docs/qa.md). + +**CycleGAN: [Project](https://junyanz.github.io/CycleGAN/) | [Paper](https://arxiv.org/pdf/1703.10593.pdf) | [Torch](https://github.com/junyanz/CycleGAN)** + + + +**Pix2pix: [Project](https://phillipi.github.io/pix2pix/) | [Paper](https://arxiv.org/pdf/1611.07004.pdf) | [Torch](https://github.com/phillipi/pix2pix)** + + + + +**[EdgesCats Demo](https://affinelayer.com/pixsrv/) | [pix2pix-tensorflow](https://github.com/affinelayer/pix2pix-tensorflow) | by [Christopher Hesse](https://twitter.com/christophrhesse)** + + + +If you use this code for your research, please cite: + +Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks +[Jun-Yan Zhu](https://people.eecs.berkeley.edu/~junyanz/)\*, [Taesung Park](https://taesung.me/)\*, [Phillip Isola](https://people.eecs.berkeley.edu/~isola/), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros) +In ICCV 2017. (* equal contributions) [[Bibtex]](https://junyanz.github.io/CycleGAN/CycleGAN.txt) + + +Image-to-Image Translation with Conditional Adversarial Networks +[Phillip Isola](https://people.eecs.berkeley.edu/~isola), [Jun-Yan Zhu](https://people.eecs.berkeley.edu/~junyanz), [Tinghui Zhou](https://people.eecs.berkeley.edu/~tinghuiz), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros) +In CVPR 2017. [[Bibtex]](http://people.csail.mit.edu/junyanz/projects/pix2pix/pix2pix.bib) + +## Course +CycleGAN course assignment [code](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-code.zip) and [handout](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-handout.pdf) designed by Prof. [Roger Grosse](http://www.cs.toronto.edu/~rgrosse/) for [CSC321](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/) "Intro to Neural Networks and Machine Learning" at University of Toronto. Please contact the instructor if you would like to adopt it in your course. + +## Other implementations +### CycleGAN +

[Tensorflow] (by Harry Yang), +[Tensorflow] (by Archit Rathore), +[Tensorflow] (by Van Huy), +[Tensorflow] (by Xiaowei Hu), + [Tensorflow-simple] (by Zhenliang He), + [TensorLayer] (by luoxier), +[Chainer] (by Yanghua Jin), +[Minimal PyTorch] (by yunjey), +[Mxnet] (by Ldpe2G), +[lasagne/keras] (by tjwei)

+ + +### pix2pix +

[Tensorflow] (by Christopher Hesse), +[Tensorflow] (by Eyyüb Sariu), + [Tensorflow (face2face)] (by Dat Tran), + [Tensorflow (film)] (by Arthur Juliani), +[Tensorflow (zi2zi)] (by Yuchen Tian), +[Chainer] (by mattya), +[tf/torch/keras/lasagne] (by tjwei), +[Pytorch] (by taey16) +

+ + +## Prerequisites +- Linux or macOS +- Python 2 or 3 +- CPU or NVIDIA GPU + CUDA CuDNN + +## Getting Started +### Installation + +- Clone this repo: +```bash +git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix +cd pytorch-CycleGAN-and-pix2pix +``` + +- Install PyTorch 0.4+ and torchvision from http://pytorch.org and other dependencies (e.g., [visdom](https://github.com/facebookresearch/visdom) and [dominate](https://github.com/Knio/dominate)). You can install all the dependencies by +```bash +pip install -r requirements.txt +``` + +- For Conda users, we include a script `./scripts/conda_deps.sh` to install PyTorch and other libraries. + +### CycleGAN train/test +- Download a CycleGAN dataset (e.g. maps): +```bash +bash ./datasets/download_cyclegan_dataset.sh maps +``` +- Train a model: +```bash +#!./scripts/train_cyclegan.sh +python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan +``` +- To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097. To see more intermediate results, check out `./checkpoints/maps_cyclegan/web/index.html` +- Test the model: +```bash +#!./scripts/test_cyclegan.sh +python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan +``` +The test results will be saved to a html file here: `./results/maps_cyclegan/latest_test/index.html`. + +### pix2pix train/test +- Download a pix2pix dataset (e.g.facades): +```bash +bash ./datasets/download_pix2pix_dataset.sh facades +``` +- Train a model: +```bash +#!./scripts/train_pix2pix.sh +python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA +``` +- To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097. To see more intermediate results, check out `./checkpoints/facades_pix2pix/web/index.html` +- Test the model (`bash ./scripts/test_pix2pix.sh`): +```bash +#!./scripts/test_pix2pix.sh +python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA +``` +The test results will be saved to a html file here: `./results/facades_pix2pix/test_latest/index.html`. + +You can find more scripts at `scripts` directory. + +### Apply a pre-trained model (CycleGAN) +- You can download a pretrained model (e.g. horse2zebra) with the following script: +```bash +bash ./scripts/download_cyclegan_model.sh horse2zebra +``` +The pretrained model is saved at `./checkpoints/{name}_pretrained/latest_net_G.pth`. Check [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_cyclegan_model.sh#L3) for all the available CycleGAN models. +- To test the model, you also need to download the horse2zebra dataset: +```bash +bash ./datasets/download_cyclegan_dataset.sh horse2zebra +``` + +- Then generate the results using +```bash +python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test +``` +The option `--model test` is used for generating results of CycleGAN only for one side. `python test.py --model cycle_gan` will require loading and generating results in both directions, which is sometimes unnecessary. The results will be saved at `./results/`. Use `--results_dir {directory_path_to_save_result}` to specify the results directory. + +- If you would like to apply a pre-trained model to a collection of input images (rather than image pairs), please use `--dataset_mode single` and `--model test` options. Here is a script to apply a model to Facade label maps (stored in the directory `facades/testB`). +``` bash +#!./scripts/test_single.sh +python test.py --dataroot ./datasets/facades/testB/ --name {your_trained_model_name} --model test +``` +You might want to specify `--netG` to match the generator architecture of the trained model. + +### Apply a pre-trained model (pix2pix) + +Download a pre-trained model with `./scripts/download_pix2pix_model.sh`. + +- Check [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_pix2pix_model.sh#L3) for all the available pix2pix models. For example, if you would like to download label2photo model on the Facades dataset, +```bash +bash ./scripts/download_pix2pix_model.sh facades_label2photo +``` +- Download the pix2pix facades datasets: +```bash +bash ./datasets/download_pix2pix_dataset.sh facades +``` +- Then generate the results using +```bash +python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained +``` +Note that we specified `--direction BtoA` as Facades dataset's A to B direction is photos to labels. + +- See a list of currently available models at `./scripts/download_pix2pix_model.sh` + +## [Datasets](docs/datasets.md) +Download pix2pix/CycleGAN datasets and create your own datasets. + +## [Training/Test Tips](docs/tips.md) +Best practice for training and testing your models. + +## [Frequently Asked Questions](docs/qa.md) +Before you post a new question, please first look at the above Q & A and existing GitHub issues. + + +## Citation +If you use this code for your research, please cite our papers. +``` +@inproceedings{CycleGAN2017, + title={Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networkss}, + author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A}, + booktitle={Computer Vision (ICCV), 2017 IEEE International Conference on}, + year={2017} +} + + +@inproceedings{isola2017image, + title={Image-to-Image Translation with Conditional Adversarial Networks}, + author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A}, + booktitle={Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on}, + year={2017} +} +``` + + + +## Related Projects +**[CycleGAN-Torch](https://github.com/junyanz/CycleGAN) | +[pix2pix-Torch](https://github.com/phillipi/pix2pix) | [pix2pixHD](https://github.com/NVIDIA/pix2pixHD) | +[iGAN](https://github.com/junyanz/iGAN) | +[BicycleGAN](https://github.com/junyanz/BicycleGAN)** + +## Cat Paper Collection +If you love cats, and love reading cool graphics, vision, and learning papers, please check out the Cat Paper [Collection](https://github.com/junyanz/CatPapers). + +## Acknowledgments +Our code is inspired by [pytorch-DCGAN](https://github.com/pytorch/examples/tree/master/dcgan). diff --git a/data/__init__.py b/data/__init__.py new file mode 100755 index 00000000..8e695a7c --- /dev/null +++ b/data/__init__.py @@ -0,0 +1,75 @@ +import importlib +import torch.utils.data +from data.base_data_loader import BaseDataLoader +from data.base_dataset import BaseDataset + + +def find_dataset_using_name(dataset_name): + # Given the option --dataset_mode [datasetname], + # the file "data/datasetname_dataset.py" + # will be imported. + dataset_filename = "data." + dataset_name + "_dataset" + datasetlib = importlib.import_module(dataset_filename) + + # In the file, the class called DatasetNameDataset() will + # be instantiated. It has to be a subclass of BaseDataset, + # and it is case-insensitive. + dataset = None + target_dataset_name = dataset_name.replace('_', '') + 'dataset' + for name, cls in datasetlib.__dict__.items(): + if name.lower() == target_dataset_name.lower() \ + and issubclass(cls, BaseDataset): + dataset = cls + + if dataset is None: + print("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) + exit(0) + + return dataset + + +def get_option_setter(dataset_name): + dataset_class = find_dataset_using_name(dataset_name) + return dataset_class.modify_commandline_options + + +def create_dataset(opt): + dataset = find_dataset_using_name(opt.dataset_mode) + instance = dataset() + instance.initialize(opt) + print("dataset [%s] was created" % (instance.name())) + return instance + + +def CreateDataLoader(opt): + data_loader = CustomDatasetDataLoader() + data_loader.initialize(opt) + return data_loader + + +# Wrapper class of Dataset class that performs +# multi-threaded data loading +class CustomDatasetDataLoader(BaseDataLoader): + def name(self): + return 'CustomDatasetDataLoader' + + def initialize(self, opt): + BaseDataLoader.initialize(self, opt) + self.dataset = create_dataset(opt) + self.dataloader = torch.utils.data.DataLoader( + self.dataset, + batch_size=opt.batch_size, + shuffle=not opt.serial_batches, + num_workers=int(opt.num_threads)) + + def load_data(self): + return self + + def __len__(self): + return min(len(self.dataset), self.opt.max_dataset_size) + + def __iter__(self): + for i, data in enumerate(self.dataloader): + if i * self.opt.batch_size >= self.opt.max_dataset_size: + break + yield data diff --git a/data/__init__.pyc b/data/__init__.pyc new file mode 100755 index 00000000..15d7de5d Binary files /dev/null and b/data/__init__.pyc differ diff --git a/data/aligned_dataset.py b/data/aligned_dataset.py new file mode 100755 index 00000000..9f460364 --- /dev/null +++ b/data/aligned_dataset.py @@ -0,0 +1,69 @@ +import os.path +import random +import torchvision.transforms as transforms +import torch +from data.base_dataset import BaseDataset +from data.image_folder import make_dataset +from PIL import Image + + +class AlignedDataset(BaseDataset): + @staticmethod + def modify_commandline_options(parser, is_train): + return parser + + def initialize(self, opt): + self.opt = opt + self.root = opt.dataroot + self.dir_AB = os.path.join(opt.dataroot, opt.phase) + self.AB_paths = sorted(make_dataset(self.dir_AB)) + assert(opt.resize_or_crop == 'resize_and_crop') + + def __getitem__(self, index): + AB_path = self.AB_paths[index] + AB = Image.open(AB_path).convert('RGB') + w, h = AB.size + assert(self.opt.loadSize >= self.opt.fineSize) + w2 = int(w / 2) + A = AB.crop((0, 0, w2, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC) + B = AB.crop((w2, 0, w, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC) + A = transforms.ToTensor()(A) + B = transforms.ToTensor()(B) + w_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1)) + h_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1)) + + A = A[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize] + B = B[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize] + + A = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A) + B = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(B) + + if self.opt.direction == 'BtoA': + input_nc = self.opt.output_nc + output_nc = self.opt.input_nc + else: + input_nc = self.opt.input_nc + output_nc = self.opt.output_nc + + if (not self.opt.no_flip) and random.random() < 0.5: + idx = [i for i in range(A.size(2) - 1, -1, -1)] + idx = torch.LongTensor(idx) + A = A.index_select(2, idx) + B = B.index_select(2, idx) + + if input_nc == 1: # RGB to gray + tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114 + A = tmp.unsqueeze(0) + + if output_nc == 1: # RGB to gray + tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114 + B = tmp.unsqueeze(0) + + return {'A': A, 'B': B, + 'A_paths': AB_path, 'B_paths': AB_path} + + def __len__(self): + return len(self.AB_paths) + + def name(self): + return 'AlignedDataset' diff --git a/data/base_data_loader.py b/data/base_data_loader.py new file mode 100755 index 00000000..ae5a1689 --- /dev/null +++ b/data/base_data_loader.py @@ -0,0 +1,10 @@ +class BaseDataLoader(): + def __init__(self): + pass + + def initialize(self, opt): + self.opt = opt + pass + + def load_data(): + return None diff --git a/data/base_data_loader.pyc b/data/base_data_loader.pyc new file mode 100755 index 00000000..ba42f466 Binary files /dev/null and b/data/base_data_loader.pyc differ diff --git a/data/base_dataset.py b/data/base_dataset.py new file mode 100755 index 00000000..e8b5e9ba --- /dev/null +++ b/data/base_dataset.py @@ -0,0 +1,102 @@ +import torch.utils.data as data +from PIL import Image +import torchvision.transforms as transforms + + +class BaseDataset(data.Dataset): + def __init__(self): + super(BaseDataset, self).__init__() + + def name(self): + return 'BaseDataset' + + @staticmethod + def modify_commandline_options(parser, is_train): + return parser + + def initialize(self, opt): + pass + + def __len__(self): + return 0 + + +def get_transform(opt): + transform_list = [] + if opt.resize_or_crop == 'resize_and_crop': + osize = [opt.loadSize, opt.loadSize] + transform_list.append(transforms.Resize(osize, Image.BICUBIC)) + transform_list.append(transforms.RandomCrop(opt.fineSize)) + elif opt.resize_or_crop == 'crop': + transform_list.append(transforms.RandomCrop(opt.fineSize)) + elif opt.resize_or_crop == 'scale_width': + transform_list.append(transforms.Lambda( + lambda img: __scale_width(img, opt.fineSize))) + elif opt.resize_or_crop == 'scale_width_and_crop': + transform_list.append(transforms.Lambda( + lambda img: __scale_width(img, opt.loadSize))) + transform_list.append(transforms.RandomCrop(opt.fineSize)) + elif opt.resize_or_crop == 'none': + transform_list.append(transforms.Lambda( + lambda img: __adjust(img))) + else: + raise ValueError('--resize_or_crop %s is not a valid option.' % opt.resize_or_crop) + + if opt.isTrain and not opt.no_flip: + transform_list.append(transforms.RandomHorizontalFlip()) + + transform_list += [transforms.ToTensor(), + transforms.Normalize((0.5, 0.5, 0.5), + (0.5, 0.5, 0.5))] + return transforms.Compose(transform_list) + + +# just modify the width and height to be multiple of 4 +def __adjust(img): + ow, oh = img.size + + # the size needs to be a multiple of this number, + # because going through generator network may change img size + # and eventually cause size mismatch error + mult = 4 + if ow % mult == 0 and oh % mult == 0: + return img + w = (ow - 1) // mult + w = (w + 1) * mult + h = (oh - 1) // mult + h = (h + 1) * mult + + if ow != w or oh != h: + __print_size_warning(ow, oh, w, h) + + return img.resize((w, h), Image.BICUBIC) + + +def __scale_width(img, target_width): + ow, oh = img.size + + # the size needs to be a multiple of this number, + # because going through generator network may change img size + # and eventually cause size mismatch error + mult = 4 + assert target_width % mult == 0, "the target width needs to be multiple of %d." % mult + if (ow == target_width and oh % mult == 0): + return img + w = target_width + target_height = int(target_width * oh / ow) + m = (target_height - 1) // mult + h = (m + 1) * mult + + if target_height != h: + __print_size_warning(target_width, target_height, w, h) + + return img.resize((w, h), Image.BICUBIC) + + +def __print_size_warning(ow, oh, w, h): + if not hasattr(__print_size_warning, 'has_printed'): + print("The image size needs to be a multiple of 4. " + "The loaded image size was (%d, %d), so it was adjusted to " + "(%d, %d). This adjustment will be done to all images " + "whose sizes are not multiples of 4" % (ow, oh, w, h)) + __print_size_warning.has_printed = True diff --git a/data/base_dataset.pyc b/data/base_dataset.pyc new file mode 100755 index 00000000..b83ebe95 Binary files /dev/null and b/data/base_dataset.pyc differ diff --git a/data/image_folder.py b/data/image_folder.py new file mode 100755 index 00000000..898200b2 --- /dev/null +++ b/data/image_folder.py @@ -0,0 +1,68 @@ +############################################################################### +# Code from +# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py +# Modified the original code so that it also loads images from the current +# directory as well as the subdirectories +############################################################################### + +import torch.utils.data as data + +from PIL import Image +import os +import os.path + +IMG_EXTENSIONS = [ + '.jpg', '.JPG', '.jpeg', '.JPEG', + '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', +] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def make_dataset(dir): + images = [] + assert os.path.isdir(dir), '%s is not a valid directory' % dir + + for root, _, fnames in sorted(os.walk(dir)): + for fname in fnames: + if is_image_file(fname): + path = os.path.join(root, fname) + images.append(path) + + return images + + +def default_loader(path): + return Image.open(path).convert('RGB') + + +class ImageFolder(data.Dataset): + + def __init__(self, root, transform=None, return_paths=False, + loader=default_loader): + imgs = make_dataset(root) + if len(imgs) == 0: + raise(RuntimeError("Found 0 images in: " + root + "\n" + "Supported image extensions are: " + + ",".join(IMG_EXTENSIONS))) + + self.root = root + self.imgs = imgs + self.transform = transform + self.return_paths = return_paths + self.loader = loader + + def __getitem__(self, index): + path = self.imgs[index] + img = self.loader(path) + if self.transform is not None: + img = self.transform(img) + if self.return_paths: + return img, path + else: + return img + + def __len__(self): + return len(self.imgs) diff --git a/data/image_folder.pyc b/data/image_folder.pyc new file mode 100755 index 00000000..1db66574 Binary files /dev/null and b/data/image_folder.pyc differ diff --git a/data/single_dataset.py b/data/single_dataset.py new file mode 100755 index 00000000..c8b76550 --- /dev/null +++ b/data/single_dataset.py @@ -0,0 +1,42 @@ +import os.path +from data.base_dataset import BaseDataset, get_transform +from data.image_folder import make_dataset +from PIL import Image + + +class SingleDataset(BaseDataset): + @staticmethod + def modify_commandline_options(parser, is_train): + return parser + + def initialize(self, opt): + self.opt = opt + self.root = opt.dataroot + self.dir_A = os.path.join(opt.dataroot) + + self.A_paths = make_dataset(self.dir_A) + + self.A_paths = sorted(self.A_paths) + + self.transform = get_transform(opt) + + def __getitem__(self, index): + A_path = self.A_paths[index] + A_img = Image.open(A_path).convert('RGB') + A = self.transform(A_img) + if self.opt.direction == 'BtoA': + input_nc = self.opt.output_nc + else: + input_nc = self.opt.input_nc + + if input_nc == 1: # RGB to gray + tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114 + A = tmp.unsqueeze(0) + + return {'A': A, 'A_paths': A_path} + + def __len__(self): + return len(self.A_paths) + + def name(self): + return 'SingleImageDataset' diff --git a/data/unaligned_dataset.py b/data/unaligned_dataset.py new file mode 100755 index 00000000..de2eec2c --- /dev/null +++ b/data/unaligned_dataset.py @@ -0,0 +1,61 @@ +import os.path +from data.base_dataset import BaseDataset, get_transform +from data.image_folder import make_dataset +from PIL import Image +import random + + +class UnalignedDataset(BaseDataset): + @staticmethod + def modify_commandline_options(parser, is_train): + return parser + + def initialize(self, opt): + self.opt = opt + self.root = opt.dataroot + self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') + self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') + + self.A_paths = make_dataset(self.dir_A) + self.B_paths = make_dataset(self.dir_B) + + self.A_paths = sorted(self.A_paths) + self.B_paths = sorted(self.B_paths) + self.A_size = len(self.A_paths) + self.B_size = len(self.B_paths) + self.transform = get_transform(opt) + + def __getitem__(self, index): + A_path = self.A_paths[index % self.A_size] + if self.opt.serial_batches: + index_B = index % self.B_size + else: + index_B = random.randint(0, self.B_size - 1) + B_path = self.B_paths[index_B] + A_img = Image.open(A_path).convert('RGB') + B_img = Image.open(B_path).convert('RGB') + + A = self.transform(A_img) + B = self.transform(B_img) + if self.opt.direction == 'BtoA': + input_nc = self.opt.output_nc + output_nc = self.opt.input_nc + else: + input_nc = self.opt.input_nc + output_nc = self.opt.output_nc + + if input_nc == 1: # RGB to gray + tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114 + A = tmp.unsqueeze(0) + + if output_nc == 1: # RGB to gray + tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114 + B = tmp.unsqueeze(0) + return {'A': A, 'B': B, + 'A_paths': A_path, 'B_paths': B_path} + + def __len__(self): + return max(self.A_size, self.B_size) + + def name(self): + return 'UnalignedDataset' diff --git a/data/unaligned_dataset.pyc b/data/unaligned_dataset.pyc new file mode 100755 index 00000000..28558741 Binary files /dev/null and b/data/unaligned_dataset.pyc differ diff --git a/datasets/bibtex/cityscapes.tex b/datasets/bibtex/cityscapes.tex new file mode 100755 index 00000000..a87bdbf5 --- /dev/null +++ b/datasets/bibtex/cityscapes.tex @@ -0,0 +1,6 @@ +@inproceedings{Cordts2016Cityscapes, +title={The Cityscapes Dataset for Semantic Urban Scene Understanding}, +author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt}, +booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, +year={2016} +} diff --git a/datasets/bibtex/facades.tex b/datasets/bibtex/facades.tex new file mode 100755 index 00000000..08b773e1 --- /dev/null +++ b/datasets/bibtex/facades.tex @@ -0,0 +1,7 @@ +@INPROCEEDINGS{Tylecek13, + author = {Radim Tyle{\v c}ek, Radim {\v S}{\' a}ra}, + title = {Spatial Pattern Templates for Recognition of Objects with Regular Structure}, + booktitle = {Proc. GCPR}, + year = {2013}, + address = {Saarbrucken, Germany}, +} diff --git a/datasets/bibtex/handbags.tex b/datasets/bibtex/handbags.tex new file mode 100755 index 00000000..b79710c7 --- /dev/null +++ b/datasets/bibtex/handbags.tex @@ -0,0 +1,13 @@ +@inproceedings{zhu2016generative, + title={Generative Visual Manipulation on the Natural Image Manifold}, + author={Zhu, Jun-Yan and Kr{\"a}henb{\"u}hl, Philipp and Shechtman, Eli and Efros, Alexei A.}, + booktitle={Proceedings of European Conference on Computer Vision (ECCV)}, + year={2016} +} + +@InProceedings{xie15hed, + author = {"Xie, Saining and Tu, Zhuowen"}, + Title = {Holistically-Nested Edge Detection}, + Booktitle = "Proceedings of IEEE International Conference on Computer Vision", + Year = {2015}, +} diff --git a/datasets/bibtex/shoes.tex b/datasets/bibtex/shoes.tex new file mode 100755 index 00000000..e67e158b --- /dev/null +++ b/datasets/bibtex/shoes.tex @@ -0,0 +1,14 @@ +@InProceedings{fine-grained, + author = {A. Yu and K. Grauman}, + title = {{F}ine-{G}rained {V}isual {C}omparisons with {L}ocal {L}earning}, + booktitle = {Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2014} +} + +@InProceedings{xie15hed, + author = {"Xie, Saining and Tu, Zhuowen"}, + Title = {Holistically-Nested Edge Detection}, + Booktitle = "Proceedings of IEEE International Conference on Computer Vision", + Year = {2015}, +} diff --git a/datasets/bibtex/transattr.tex b/datasets/bibtex/transattr.tex new file mode 100755 index 00000000..05858499 --- /dev/null +++ b/datasets/bibtex/transattr.tex @@ -0,0 +1,8 @@ +@article {Laffont14, + title = {Transient Attributes for High-Level Understanding and Editing of Outdoor Scenes}, + author = {Pierre-Yves Laffont and Zhile Ren and Xiaofeng Tao and Chao Qian and James Hays}, + journal = {ACM Transactions on Graphics (proceedings of SIGGRAPH)}, + volume = {33}, + number = {4}, + year = {2014} +} diff --git a/datasets/combine_A_and_B.py b/datasets/combine_A_and_B.py new file mode 100755 index 00000000..c69dc567 --- /dev/null +++ b/datasets/combine_A_and_B.py @@ -0,0 +1,48 @@ +import os +import numpy as np +import cv2 +import argparse + +parser = argparse.ArgumentParser('create image pairs') +parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='../dataset/50kshoes_edges') +parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='../dataset/50kshoes_jpg') +parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB') +parser.add_argument('--num_imgs', dest='num_imgs', help='number of images', type=int, default=1000000) +parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)', action='store_true') +args = parser.parse_args() + +for arg in vars(args): + print('[%s] = ' % arg, getattr(args, arg)) + +splits = os.listdir(args.fold_A) + +for sp in splits: + img_fold_A = os.path.join(args.fold_A, sp) + img_fold_B = os.path.join(args.fold_B, sp) + img_list = os.listdir(img_fold_A) + if args.use_AB: + img_list = [img_path for img_path in img_list if '_A.' in img_path] + + num_imgs = min(args.num_imgs, len(img_list)) + print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list))) + img_fold_AB = os.path.join(args.fold_AB, sp) + if not os.path.isdir(img_fold_AB): + os.makedirs(img_fold_AB) + print('split = %s, number of images = %d' % (sp, num_imgs)) + for n in range(num_imgs): + name_A = img_list[n] + path_A = os.path.join(img_fold_A, name_A) + if args.use_AB: + name_B = name_A.replace('_A.', '_B.') + else: + name_B = name_A + path_B = os.path.join(img_fold_B, name_B) + if os.path.isfile(path_A) and os.path.isfile(path_B): + name_AB = name_A + if args.use_AB: + name_AB = name_AB.replace('_A.', '.') # remove _A + path_AB = os.path.join(img_fold_AB, name_AB) + im_A = cv2.imread(path_A, cv2.CV_LOAD_IMAGE_COLOR) + im_B = cv2.imread(path_B, cv2.CV_LOAD_IMAGE_COLOR) + im_AB = np.concatenate([im_A, im_B], 1) + cv2.imwrite(path_AB, im_AB) diff --git a/datasets/download_cyclegan_dataset.sh b/datasets/download_cyclegan_dataset.sh new file mode 100755 index 00000000..bfa64141 --- /dev/null +++ b/datasets/download_cyclegan_dataset.sh @@ -0,0 +1,15 @@ +FILE=$1 + +if [[ $FILE != "ae_photos" && $FILE != "apple2orange" && $FILE != "summer2winter_yosemite" && $FILE != "horse2zebra" && $FILE != "monet2photo" && $FILE != "cezanne2photo" && $FILE != "ukiyoe2photo" && $FILE != "vangogh2photo" && $FILE != "maps" && $FILE != "cityscapes" && $FILE != "facades" && $FILE != "iphone2dslr_flower" && $FILE != "ae_photos" && $FILE != "mini" && $FILE != "mini_pix2pix" ]]; then + echo "Available datasets are: apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos" + exit 1 +fi + +echo "Specified [$FILE]" +URL=https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/$FILE.zip +ZIP_FILE=./datasets/$FILE.zip +TARGET_DIR=./datasets/$FILE/ +wget -N $URL -O $ZIP_FILE +mkdir $TARGET_DIR +unzip $ZIP_FILE -d ./datasets/ +rm $ZIP_FILE diff --git a/datasets/download_pix2pix_dataset.sh b/datasets/download_pix2pix_dataset.sh new file mode 100755 index 00000000..e4987227 --- /dev/null +++ b/datasets/download_pix2pix_dataset.sh @@ -0,0 +1,16 @@ +FILE=$1 + +if [[ $FILE != "cityscapes" && $FILE != "night2day" && $FILE != "edges2handbags" && $FILE != "edges2shoes" && $FILE != "facades" && $FILE != "maps" ]]; then + echo "Available datasets are cityscapes, night2day, edges2handbags, edges2shoes, facades, maps" + exit 1 +fi + +echo "Specified [$FILE]" + +URL=http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/$FILE.tar.gz +TAR_FILE=./datasets/$FILE.tar.gz +TARGET_DIR=./datasets/$FILE/ +wget -N $URL -O $TAR_FILE +mkdir -p $TARGET_DIR +tar -zxvf $TAR_FILE -C ./datasets/ +rm $TAR_FILE diff --git a/datasets/make_dataset_aligned.py b/datasets/make_dataset_aligned.py new file mode 100755 index 00000000..739c7679 --- /dev/null +++ b/datasets/make_dataset_aligned.py @@ -0,0 +1,63 @@ +import os + +from PIL import Image + + +def get_file_paths(folder): + image_file_paths = [] + for root, dirs, filenames in os.walk(folder): + filenames = sorted(filenames) + for filename in filenames: + input_path = os.path.abspath(root) + file_path = os.path.join(input_path, filename) + if filename.endswith('.png') or filename.endswith('.jpg'): + image_file_paths.append(file_path) + + break # prevent descending into subfolders + return image_file_paths + + +def align_images(a_file_paths, b_file_paths, target_path): + if not os.path.exists(target_path): + os.makedirs(target_path) + + for i in range(len(a_file_paths)): + img_a = Image.open(a_file_paths[i]) + img_b = Image.open(b_file_paths[i]) + assert(img_a.size == img_b.size) + + aligned_image = Image.new("RGB", (img_a.size[0] * 2, img_a.size[1])) + aligned_image.paste(img_a, (0, 0)) + aligned_image.paste(img_b, (img_a.size[0], 0)) + aligned_image.save(os.path.join(target_path, '{:04d}.jpg'.format(i))) + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument( + '--dataset-path', + dest='dataset_path', + help='Which folder to process (it should have subfolders testA, testB, trainA and trainB' + ) + args = parser.parse_args() + + dataset_folder = args.dataset_path + print(dataset_folder) + + test_a_path = os.path.join(dataset_folder, 'testA') + test_b_path = os.path.join(dataset_folder, 'testB') + test_a_file_paths = get_file_paths(test_a_path) + test_b_file_paths = get_file_paths(test_b_path) + assert(len(test_a_file_paths) == len(test_b_file_paths)) + test_path = os.path.join(dataset_folder, 'test') + + train_a_path = os.path.join(dataset_folder, 'trainA') + train_b_path = os.path.join(dataset_folder, 'trainB') + train_a_file_paths = get_file_paths(train_a_path) + train_b_file_paths = get_file_paths(train_b_path) + assert(len(train_a_file_paths) == len(train_b_file_paths)) + train_path = os.path.join(dataset_folder, 'train') + + align_images(test_a_file_paths, test_b_file_paths, test_path) + align_images(train_a_file_paths, train_b_file_paths, train_path) diff --git a/docs/datasets.md b/docs/datasets.md new file mode 100755 index 00000000..42e88a40 --- /dev/null +++ b/docs/datasets.md @@ -0,0 +1,44 @@ + + +### CycleGAN Datasets +Download the CycleGAN datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data. +```bash +bash ./datasets/download_cyclegan_dataset.sh dataset_name +``` +- `facades`: 400 images from the [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](datasets/bibtex/facades.tex)] +- `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](datasets/bibtex/cityscapes.tex)] +- `maps`: 1096 training images scraped from Google Maps. +- `horse2zebra`: 939 horse images and 1177 zebra images downloaded from [ImageNet](http://www.image-net.org) using keywords `wild horse` and `zebra` +- `apple2orange`: 996 apple images and 1020 orange images downloaded from [ImageNet](http://www.image-net.org) using keywords `apple` and `navel orange`. +- `summer2winter_yosemite`: 1273 summer Yosemite images and 854 winter Yosemite images were downloaded using Flickr API. See more details in our paper. +- `monet2photo`, `vangogh2photo`, `ukiyoe2photo`, `cezanne2photo`: The art images were downloaded from [Wikiart](https://www.wikiart.org/). The real photos are downloaded from Flickr using the combination of the tags *landscape* and *landscapephotography*. The training set size of each class is Monet:1074, Cezanne:584, Van Gogh:401, Ukiyo-e:1433, Photographs:6853. +- `iphone2dslr_flower`: both classes of images were downlaoded from Flickr. The training set size of each class is iPhone:1813, DSLR:3316. See more details in our paper. + +To train a model on your own datasets, you need to create a data folder with two subdirectories `trainA` and `trainB` that contain images from domain A and B. You can test your model on your training set by setting `--phase train` in `test.py`. You can also create subdirectories `testA` and `testB` if you have test data. + +You should **not** expect our method to work on just any random combination of input and output datasets (e.g. `cats<->keyboards`). From our experiments, we find it works better if two datasets share similar visual content. For example, `landscape painting<->landscape photographs` works much better than `portrait painting <-> landscape photographs`. `zebras<->horses` achieves compelling results while `cats<->dogs` completely fails. + +### pix2pix datasets +Download the pix2pix datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data. +```bash +bash ./datasets/download_pix2pix_dataset.sh dataset_name +``` +- `facades`: 400 images from [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](datasets/bibtex/facades.tex)] +- `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](datasets/bibtex/cityscapes.tex)] +- `maps`: 1096 training images scraped from Google Maps +- `edges2shoes`: 50k training images from [UT Zappos50K dataset](http://vision.cs.utexas.edu/projects/finegrained/utzap50k). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/shoes.tex)] +- `edges2handbags`: 137K Amazon Handbag images from [iGAN project](https://github.com/junyanz/iGAN). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/handbags.tex)] +- `night2day`: around 20K natural scene images from [Transient Attributes dataset](http://transattr.cs.brown.edu/) [[Citation](datasets/bibtex/transattr.tex)]. To train a `day2night` pix2pix model, you need to add `--direction BtoA`. + +We provide a python script to generate pix2pix training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A: + +Create folder `/path/to/data` with subfolders `A` and `B`. `A` and `B` should each have their own subfolders `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc). + +Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`. + +Once the data is formatted this way, call: +```bash +python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data +``` + +This will combine each pair of images (A,B) into a single image file, ready for training. diff --git a/docs/qa.md b/docs/qa.md new file mode 100755 index 00000000..6501ccb9 --- /dev/null +++ b/docs/qa.md @@ -0,0 +1,107 @@ +## Frequently Asked Questions +Before you post a new question, please first look at the following Q & A and existing GitHub issues. You may also want to read [Training/Test tips](docs/tips.md) for more suggestions. + +#### Connection Error:HTTPConnectionPool ([#230](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/230), [#24](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/24), [#38](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/38)) +Similar error messages include “Failed to establish a new connection/Connection refused”. + +Please start the visdom server before starting the training: +```bash +python -m visdom.server +``` +To install the visdom, you can use the following command: +```bash +pip install visdom +``` +You can also disable the visdom by setting `--display_id 0`. + +#### My PyTorch errors on CUDA related code. +Try to run the following code snippet to make sure that CUDA is working (assuming using PyTorch >= 0.4): +```python +import torch +torch.cuda.init() +print(torch.randn(1, device='cuda') +``` + +If you met an error, it is likely that your PyTorch build does not work with CUDA, e.g., it is installl from the official MacOS binary, or you have a GPU that is too old and not supported anymore. You may run the the code with CPU using `--device_ids -1`. + +#### TypeError: Object of type 'Tensor' is not JSON serializable ([#258](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/258)) +Similar errors: AttributeError: module 'torch' has no attribute 'device' ([#314](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/314)) + +The current code only works with PyTorch 0.4+. An earlier PyTorch version can often cause the above errors. + +#### ValueError: empty range for randrange() ([#390](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/390), [#376](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/376), [#194](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/194)) +Similar error messages include "ConnectionRefusedError: [Errno 111] Connection refused" + +It is related to data augmentation step. It often happens when you use `--resize_or_crop crop`. The program will crop random `fineSize x fineSize` patches out of the input training images. But if some of your image sizes (e.g., `256x384`) are smaller than the `fineSize` (e.g., 512), you will get this error. A simple fix will be to use other data augmentation methods such as `--resize_and_crop` or `scale_width_and_crop`. Our program will automatically resize the images according to `loadSize` before apply `fineSize x fineSize` cropping. Make sure that `loadSize >= fineSize`. + + +#### Can I continue/resume my training? ([#350](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/350), [#275](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/275), [#234](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/234), [#87](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/87)) +You can use the option `--continue_train`. Also set `--epoch_count` to specify a different starting epoch count. See more discussion in [training/test tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#trainingtest-tips. + +#### Why does my training loss not converge? ([#335](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/335), [#164](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/164), [#30](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/30)) +Many GAN losses do not converge (exception: WGAN, WGAN-GP, etc. ) due to the nature of minimax optimization. For DCGAN and LSGAN objective, it is quite normal for the G and D losses to go up and down. It should be fine as long as they do not blow up. + +#### How can I make it work for my own data (e.g., 16-bit png, tiff, hyperspectral images)? ([#309](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/309), [#320](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/), [#202](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/202)) +The current code only supports RGB and grayscale images. If you would like to train the model on other data types, please follow the following steps: + +- change the parameters `--input_nc` and `--output_nc` to the number of channels in your input/output images. +- Write your own custom data loader (It is easy as long as you know how to load your data with python). If you write a new data loader class, you need to change the flag `--dataset_mode` accordingly. Alternatively, you can modify the existing data loader. For aligned datasets, change this [line](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/aligned_dataset.py#L24); For unaligned datasets, change these two [lines](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/unaligned_dataset.py#L36). + +- If you use visdom and HTML to visualize the results, you may also need to change the visualization code. + +#### Multi-GPU Training ([#327](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/327), [#292](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/292), [#137](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/137), [#35](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/35)) +You can use Multi-GPU training by setting `--gpu_ids` (e.g., `--gpu_ids 0,1,2,3` for the first four GPUs on your machine.) To fully utilize all the GPUs, you need to increase your batch size. Try `--batch_size 4`, `--batch_size 16`, or even a larger batch_size. Each GPU will process batch_size/#GPUs images. The optimal batch size depends on the number of GPUs you have, GPU memory per GPU, and the resolution of your training images. + +We also recommend that you use the instance normalization for multi-GPU training by setting `--norm instance`. The current batch normalization might not work for multi-GPUs as the batchnorm parameters are not shared across different GPUs. Advanced users can try [synchronized batchnorm](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch). + + +#### Can I run the model on CPU? ([#310](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/310)) +Yes, you can set `--gpu_ids -1`. See [training/test tips](docs/tips.md) for more details. + + +#### Are pre-trained models available? ([#10](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/10)) +Yes, you can download pretrained models with the bash script `./scripts/download_cyclegan_model.sh`. See [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix#apply-a-pre-trained-model-cyclegan) for more details. We are slowly adding more models to the repo. + +#### Out of memory ([#174](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/174)) +CycleGAN is more memory-intensive than pix2pix as it requires two generators and two discriminators. If you would like to produce high-resolution images, you can do the following. + +- During training, train CycleGAN on cropped images of the training set. Please be careful not to change the aspect ratio or the scale of the original image, as this can lead to the training/test gap. You can usually do this by using `--resize_or_crop crop` option, or `--resize_or_crop scale_width_and_crop`. + +- Then at test time, you can load only one generator to produce the results in a single direction. This greatly saves GPU memory as you are not loading the discriminators and the other generator in the opposite direction. You can probably take the whole image as input. You can do this using `--model test --dataroot [path to the directory that contains your test images (e.g., ./datasets/horse2zebra/trainA)] --model_suffix _A --resize_or_crop none`. You can use either `--resize_or_crop none` or `--resize_or_crop scale_width --fineSize [your_desired_image_width]`. Please see the [model_suffix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/test_model.py#L16) and [resize_or_crop](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/base_dataset.py#L24) for more details. + +#### What is the identity loss? ([#322](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/322), [#373](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/373), [#362](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/pull/362)) +We use the identity loss for our photo to painting application. The identity loss can regularize the generator to be close to an identity mapping when fed with real samples from the *target* domain. If something already looks like from the target domain, you should preserve the image without making additional changes. The generator trained with this loss will often be more conservative for unknown content. Please see more details in Sec 5.2 ''Photo generation from paintings'' and Figure 12 in the CycleGAN [paper](https://arxiv.org/pdf/1703.10593.pdf). The loss was first proposed in the Equation 6 of the prior work [[Taigman et al., 2017]](https://arxiv.org/pdf/1611.02200.pdf). + +#### The color gets inverted from the beginning of training ([#249](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/249)) +The authors also observe that the generator unnecessarily inverts the color of the input image early in training, and then never learns to undo the inversion. In this case, you can try two things. + +- First, try using identity loss `--identity 1.0` or `--identity 0.1`. We observe that the identity loss makes the generator to be more conservative and make fewer unnecessary changes. However, because of this, the change may not be as dramatic. + +- Second, try smaller variance when initializing weights by changing `--init_gain`. We observe that smaller variance in weight initialization results in less color inversion. + +#### For labels2photo Cityscapes evaluation, why does the pretrained FCN-8s model not work well on the original Cityscapes input images? ([#150](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/150)) +The model was trained on 256x256 images that are resized/upsampled to 1024x2048, so expected input images to the network are very blurry. The purpose of the resizing was to 1) keep the label maps in the original high resolution untouched and 2) avoid the need of changing the standard FCN training code for Cityscapes. + +#### How do I get the `ground-truth` numbers on the labels2photo Cityscapes evaluation? ([#150](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/150)) +You need to resize the original Cityscapes images to 256x256 before running the evaluation code. + + +#### Using resize-conv to reduce checkerboard artifacts ([#190](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/190), [#64](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/64)) +This Distill [blog](https://distill.pub/2016/deconv-checkerboard/) discussed one of the potential causes of the checkerboard artifacts. You can fix that issue by switching from "deconvolution" to nearest-neighbor upsampling followed by regular convolution. Here is one implementation provided by [@SsnL](https://github.com/SsnL). You can replace the ConvTranspose2d with the following layers. +```python +nn.Upsample(scale_factor = 2, mode='bilinear'), +nn.ReflectionPad2d(1), +nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0), +``` +We have also noticed that sometimes the checkboard artifacts will go away if you train long enough. Maybe you can try training your model a bit longer. + +#### pix2pix/CycleGAN has no random noise z ([#152](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/152)) +The current pix2pix/CycleGAN model does not take z as input. In both pix2pix and CycleGAN, we tried to add z to the generator: e.g., adding z to a latent state, concatenating with a latent state, applying dropout, etc., but often found the output did not vary significantly as a function of z. Conditional GANs do not need noise as long as the input is sufficiently complex so that the input can kind of play the role of noise. Without noise, the mapping is deterministic. + +Please check out the following papers that show ways of getting z to actually have a substantial effect: e.g., [BicycleGAN](https://github.com/junyanz/BicycleGAN), [AugmentedCycleGAN](https://arxiv.org/abs/1802.10151), [MUNIT](https://arxiv.org/abs/1804.04732), [DRIT](https://arxiv.org/pdf/1808.00948.pdf), etc. + +#### Experiment details (e.g., BW->color) ([#306](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/306)) +You can find more training details and hyperparameter settings in the appendix of [CycleGAN](https://arxiv.org/abs/1703.10593) and [pix2pix](https://arxiv.org/abs/1611.07004) papers. + +#### Results with [Cycada](https://arxiv.org/pdf/1711.03213.pdf) +We generated the [result of translating GTA images to Cityscapes-style images](https://junyanz.github.io/CycleGAN/) using our Torch repo. Our PyTorch and Torch implementation seemed to produce a little bit different results, although we have not measured the FCN score using the pytorch-trained model. To reproduce the result of Cycada, please use the Torch repo for now. diff --git a/docs/tips.md b/docs/tips.md new file mode 100755 index 00000000..5d827a10 --- /dev/null +++ b/docs/tips.md @@ -0,0 +1,26 @@ +## Training/test Tips +#### Training/test options +Please see `options/train_options.py` and `options/base_options.py` for the training flags; see `options/test_options.py` and `options/base_options.py` for the test flags. There are some model-specific flags as well, which are added in the model files, such as `--lambda_A` option in `model/cycle_gan_model.py`. The default values of these options are also adjusted in the model files. +#### CPU/GPU (default `--gpu_ids 0`) +Please set`--gpu_ids -1` to use CPU mode; set `--gpu_ids 0,1,2` for multi-GPU mode. You need a large batch size (e.g. `--batch_size 32`) to benefit from multiple GPUs. + +#### Visualization +During training, the current results can be viewed using two methods. First, if you set `--display_id` > 0, the results and loss plot will appear on a local graphics web server launched by [visdom](https://github.com/facebookresearch/visdom). To do this, you should have `visdom` installed and a server running by the command `python -m visdom.server`. The default server URL is `http://localhost:8097`. `display_id` corresponds to the window ID that is displayed on the `visdom` server. The `visdom` display functionality is turned on by default. To avoid the extra overhead of communicating with `visdom` set `--display_id -1`. Second, the intermediate results are saved to `[opt.checkpoints_dir]/[opt.name]/web/` as an HTML file. To avoid this, set `--no_html`. + +#### Preprocessing + Images can be resized and cropped in different ways using `--resize_or_crop` option. The default option `'resize_and_crop'` resizes the image to be of size `(opt.loadSize, opt.loadSize)` and does a random crop of size `(opt.fineSize, opt.fineSize)`. `'crop'` skips the resizing step and only performs random cropping. `'scale_width'` resizes the image to have width `opt.fineSize` while keeping the aspect ratio. `'scale_width_and_crop'` first resizes the image to have width `opt.loadSize` and then does random cropping of size `(opt.fineSize, opt.fineSize)`. `'none'` tries to skip all these preprocessing steps. However, if the image size is not a multiple of some number depending on the number of downsamplings of the generator, you will get an error because the size of the output image may be different from the size of the input image. Therefore, `'none'` option still tries to adjust the image size to be a multiple of 4. You might need a bigger adjustment if you change the generator architecture. Please see `data/base_datset.py` do see how all these were implemented. + +#### Fine-tuning/resume training +To fine-tune a pre-trained model, or resume the previous training, use the `--continue_train` flag. The program will then load the model based on `epoch`. By default, the program will initialize the epoch count as 1. Set `--epoch_count ` to specify a different starting epoch count. + +#### About image size + Since the generator architecture in CycleGAN involves a series of downsampling / upsampling operations, the size of the input and output image may not match if the input image size is not a multiple of 4. As a result, you may get a runtime error because the L1 identity loss cannot be enforced with images of different size. Therefore, we slightly resize the image to become multiples of 4 even with `--resize_or_crop none` option. For the same reason, `--fineSize` needs to be a multiple of 4. + +#### Training/Testing with high res images +CycleGAN is quite memory-intensive as four networks (two generators and two discriminators) need to be loaded on one GPU, so a large image cannot be entirely loaded. In this case, we recommend training with cropped images. For example, to generate 1024px results, you can train with `--resize_or_crop scale_width_and_crop --loadSize 1024 --fineSize 360`, and test with `--resize_or_crop scale_width --fineSize 1024`. This way makes sure the training and test will be at the same scale. At test time, you can afford higher resolution because you don’t need to load all networks. + +#### About loss curve +Unfortunately, the loss curve does not reveal much information in training GANs, and CycleGAN is no exception. To check whether the training has converged or not, we recommend periodically generating a few samples and looking at them. + +#### About batch size +For all experiments in the paper, we set the batch size to be 1. If there is room for memory, you can use higher batch size with batch norm or instance norm. (Note that the default batchnorm does not work well with multi-GPU training. You may consider using [synchronized batchnorm](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch) instead). But please be aware that it can impact the training. In particular, even with Instance Normalization, different batch sizes can lead to different results. Moreover, increasing `--fineSize` may be a good alternative to increasing the batch size. diff --git a/environment.yml b/environment.yml new file mode 100755 index 00000000..f7f382a6 --- /dev/null +++ b/environment.yml @@ -0,0 +1,14 @@ +name: pytorch-CycleGAN-and-pix2pix +channels: +- peterjc123 +- defaults +dependencies: +- python=3.5.5 +- pytorch=0.4 +- scipy +- pip: + - dominate==2.3.1 + - git+https://github.com/pytorch/vision.git + - Pillow==5.0.0 + - numpy==1.14.1 + - visdom==0.1.7 diff --git a/models/__init__.py b/models/__init__.py new file mode 100755 index 00000000..4d920917 --- /dev/null +++ b/models/__init__.py @@ -0,0 +1,39 @@ +import importlib +from models.base_model import BaseModel + + +def find_model_using_name(model_name): + # Given the option --model [modelname], + # the file "models/modelname_model.py" + # will be imported. + model_filename = "models." + model_name + "_model" + modellib = importlib.import_module(model_filename) + + # In the file, the class called ModelNameModel() will + # be instantiated. It has to be a subclass of BaseModel, + # and it is case-insensitive. + model = None + target_model_name = model_name.replace('_', '') + 'model' + for name, cls in modellib.__dict__.items(): + if name.lower() == target_model_name.lower() \ + and issubclass(cls, BaseModel): + model = cls + + if model is None: + print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) + exit(0) + + return model + + +def get_option_setter(model_name): + model_class = find_model_using_name(model_name) + return model_class.modify_commandline_options + + +def create_model(opt): + model = find_model_using_name(opt.model) + instance = model() + instance.initialize(opt) + print("model [%s] was created" % (instance.name())) + return instance diff --git a/models/__init__.pyc b/models/__init__.pyc new file mode 100755 index 00000000..b7344327 Binary files /dev/null and b/models/__init__.pyc differ diff --git a/models/base_model.py b/models/base_model.py new file mode 100755 index 00000000..b98ea27d --- /dev/null +++ b/models/base_model.py @@ -0,0 +1,159 @@ +import os +import torch +from collections import OrderedDict +from . import networks + + +class BaseModel(): + + # modify parser to add command line options, + # and also change the default values if needed + @staticmethod + def modify_commandline_options(parser, is_train): + return parser + + def name(self): + return 'BaseModel' + + def initialize(self, opt): + self.opt = opt + self.gpu_ids = opt.gpu_ids + self.isTrain = opt.isTrain + self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') + self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) + if opt.resize_or_crop != 'scale_width': + torch.backends.cudnn.benchmark = True + self.loss_names = [] + self.model_names = [] + self.visual_names = [] + self.image_paths = [] + + def set_input(self, input): + self.input = input + + def forward(self): + pass + + # load and print networks; create schedulers + def setup(self, opt, parser=None): + if self.isTrain: + self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] + + if not self.isTrain or opt.continue_train: + self.load_networks(opt.epoch) + self.print_networks(opt.verbose) + + # make models eval mode during test time + def eval(self): + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + net.eval() + + # used in test time, wrapping `forward` in no_grad() so we don't save + # intermediate steps for backprop + def test(self): + with torch.no_grad(): + self.forward() + + # get image paths + def get_image_paths(self): + return self.image_paths + + def optimize_parameters(self): + pass + + # update learning rate (called once every epoch) + def update_learning_rate(self): + for scheduler in self.schedulers: + scheduler.step() + lr = self.optimizers[0].param_groups[0]['lr'] + print('learning rate = %.7f' % lr) + + # return visualization images. train.py will display these images, and save the images to a html + def get_current_visuals(self): + visual_ret = OrderedDict() + for name in self.visual_names: + if isinstance(name, str): + visual_ret[name] = getattr(self, name) + return visual_ret + + # return traning losses/errors. train.py will print out these errors as debugging information + def get_current_losses(self): + errors_ret = OrderedDict() + for name in self.loss_names: + if isinstance(name, str): + # float(...) works for both scalar tensor and float number + errors_ret[name] = float(getattr(self, 'loss_' + name)) + return errors_ret + + # save models to the disk + def save_networks(self, epoch): + for name in self.model_names: + if isinstance(name, str): + save_filename = '%s_net_%s.pth' % (epoch, name) + save_path = os.path.join(self.save_dir, save_filename) + net = getattr(self, 'net' + name) + + if len(self.gpu_ids) > 0 and torch.cuda.is_available(): + torch.save(net.module.cpu().state_dict(), save_path) + net.cuda(self.gpu_ids[0]) + else: + torch.save(net.cpu().state_dict(), save_path) + + def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): + key = keys[i] + if i + 1 == len(keys): # at the end, pointing to a parameter/buffer + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'running_mean' or key == 'running_var'): + if getattr(module, key) is None: + state_dict.pop('.'.join(keys)) + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'num_batches_tracked'): + state_dict.pop('.'.join(keys)) + else: + self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) + + # load models from the disk + def load_networks(self, epoch): + for name in self.model_names: + if isinstance(name, str): + load_filename = '%s_net_%s.pth' % (epoch, name) + load_path = os.path.join(self.save_dir, load_filename) + net = getattr(self, 'net' + name) + if isinstance(net, torch.nn.DataParallel): + net = net.module + print('loading the model from %s' % load_path) + # if you are using PyTorch newer than 0.4 (e.g., built from + # GitHub source), you can remove str() on self.device + state_dict = torch.load(load_path, map_location=str(self.device)) + if hasattr(state_dict, '_metadata'): + del state_dict._metadata + + # patch InstanceNorm checkpoints prior to 0.4 + for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop + self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) + net.load_state_dict(state_dict) + + # print network information + def print_networks(self, verbose): + print('---------- Networks initialized -------------') + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + num_params = 0 + for param in net.parameters(): + num_params += param.numel() + if verbose: + print(net) + print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) + print('-----------------------------------------------') + + # set requies_grad=Fasle to avoid computation + def set_requires_grad(self, nets, requires_grad=False): + if not isinstance(nets, list): + nets = [nets] + for net in nets: + if net is not None: + for param in net.parameters(): + param.requires_grad = requires_grad diff --git a/models/base_model.pyc b/models/base_model.pyc new file mode 100755 index 00000000..3fee66b1 Binary files /dev/null and b/models/base_model.pyc differ diff --git a/models/cycle_gan_model.py b/models/cycle_gan_model.py new file mode 100755 index 00000000..825f90d0 --- /dev/null +++ b/models/cycle_gan_model.py @@ -0,0 +1,149 @@ +import torch +import itertools +from util.image_pool import ImagePool +from .base_model import BaseModel +from . import networks + + +class CycleGANModel(BaseModel): + def name(self): + return 'CycleGANModel' + + @staticmethod + def modify_commandline_options(parser, is_train=True): + # default CycleGAN did not use dropout + parser.set_defaults(no_dropout=True) + if is_train: + parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)') + parser.add_argument('--lambda_B', type=float, default=10.0, + help='weight for cycle loss (B -> A -> B)') + parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1') + + return parser + + def initialize(self, opt): + BaseModel.initialize(self, opt) + + # specify the training losses you want to print out. The program will call base_model.get_current_losses + self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B'] + # specify the images you want to save/display. The program will call base_model.get_current_visuals + visual_names_A = ['real_A', 'fake_B', 'rec_A'] + visual_names_B = ['real_B', 'fake_A', 'rec_B'] + if self.isTrain and self.opt.lambda_identity > 0.0: + visual_names_A.append('idt_A') + visual_names_B.append('idt_B') + + self.visual_names = visual_names_A + visual_names_B + # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks + if self.isTrain: + self.model_names = ['G_A', 'G_B', 'D_A', 'D_B'] + else: # during test time, only load Gs + self.model_names = ['G_A', 'G_B'] + + # load/define networks + # The naming conversion is different from those used in the paper + # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) + self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, + not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, depth=18) + self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm, + not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, depth=18) + + if self.isTrain: + use_sigmoid = opt.no_lsgan + self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, + opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids) + self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, + opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids) + + if self.isTrain: + self.fake_A_pool = ImagePool(opt.pool_size) + self.fake_B_pool = ImagePool(opt.pool_size) + # define loss functions + self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device) + self.criterionCycle = torch.nn.L1Loss() + self.criterionIdt = torch.nn.L1Loss() + # initialize optimizers + self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), + lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), + lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizers = [] + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D) + + def set_input(self, input): + AtoB = self.opt.direction == 'AtoB' + self.real_A = input['A' if AtoB else 'B'].to(self.device) + self.real_B = input['B' if AtoB else 'A'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + + def forward(self): + self.fake_B = self.netG_A(self.real_A) + self.rec_A = self.netG_B(self.fake_B) + + self.fake_A = self.netG_B(self.real_B) + self.rec_B = self.netG_A(self.fake_A) + + def backward_D_basic(self, netD, real, fake): + # Real + pred_real = netD(real) + loss_D_real = self.criterionGAN(pred_real, True) + # Fake + pred_fake = netD(fake.detach()) + loss_D_fake = self.criterionGAN(pred_fake, False) + # Combined loss + loss_D = (loss_D_real + loss_D_fake) * 0.5 + # backward + loss_D.backward() + return loss_D + + def backward_D_A(self): + fake_B = self.fake_B_pool.query(self.fake_B) + self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B) + + def backward_D_B(self): + fake_A = self.fake_A_pool.query(self.fake_A) + self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A) + + def backward_G(self): + lambda_idt = self.opt.lambda_identity + lambda_A = self.opt.lambda_A + lambda_B = self.opt.lambda_B + # Identity loss + if lambda_idt > 0: + # G_A should be identity if real_B is fed. + self.idt_A = self.netG_A(self.real_B) + self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt + # G_B should be identity if real_A is fed. + self.idt_B = self.netG_B(self.real_A) + self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt + else: + self.loss_idt_A = 0 + self.loss_idt_B = 0 + + # GAN loss D_A(G_A(A)) + self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True) + # GAN loss D_B(G_B(B)) + self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True) + # Forward cycle loss + self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A + # Backward cycle loss + self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B + # combined loss + self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B + self.loss_G.backward() + + def optimize_parameters(self): + # forward + self.forward() + # G_A and G_B + self.set_requires_grad([self.netD_A, self.netD_B], False) + self.optimizer_G.zero_grad() + self.backward_G() + self.optimizer_G.step() + # D_A and D_B + self.set_requires_grad([self.netD_A, self.netD_B], True) + self.optimizer_D.zero_grad() + self.backward_D_A() + self.backward_D_B() + self.optimizer_D.step() diff --git a/models/cycle_gan_model.pyc b/models/cycle_gan_model.pyc new file mode 100755 index 00000000..fa9a26e4 Binary files /dev/null and b/models/cycle_gan_model.pyc differ diff --git a/models/networks.py b/models/networks.py new file mode 100755 index 00000000..06c55ce4 --- /dev/null +++ b/models/networks.py @@ -0,0 +1,913 @@ +import torch +import torch.nn as nn +from torch.nn import init +import functools +from torch.optim import lr_scheduler + +import torch.nn.functional as F + +import math +import torch.utils.model_zoo as model_zoo + +############################################################################### +# Helper Functions +############################################################################### + + +def get_norm_layer(norm_type='instance'): + if norm_type == 'batch': + norm_layer = functools.partial(nn.BatchNorm2d, affine=True) + elif norm_type == 'instance': + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) + elif norm_type == 'none': + norm_layer = None + else: + raise NotImplementedError('normalization layer [%s] is not found' % norm_type) + return norm_layer + + +def get_scheduler(optimizer, opt): + if opt.lr_policy == 'lambda': + def lambda_rule(epoch): + lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1) + return lr_l + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) + elif opt.lr_policy == 'step': + scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) + elif opt.lr_policy == 'plateau': + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) + elif opt.lr_policy == 'cosine': + scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0) + else: + return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) + return scheduler + + +def init_weights(net, init_type='normal', gain=0.02): + def init_func(m): + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: + init.normal_(m.weight.data, 1.0, gain) + init.constant_(m.bias.data, 0.0) + + print('initialize network with %s' % init_type) + net.apply(init_func) + + +def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]): + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + net.to(gpu_ids[0]) + net = torch.nn.DataParallel(net, gpu_ids) + init_weights(net, init_type, gain=init_gain) + return net + + +def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[], depth=18): + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netG == 'resnet_9blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9) + elif netG == 'resnet_6blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6) + elif netG == 'resnet_fpn': + # Create the model + if depth == 18: + net = resnet18(pretrained=False) + print "EVET" + # netG_B2A = resnet18(pretrained=False) + elif depth == 34: + net = resnet34(pretrained=False) + # netG_B2A = resnet34(pretrained=False) + elif depth == 50: + net = resnet50(pretrained=False) + # netG_B2A = resnet50(pretrained=False) + # net = Resnet(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6) + elif netG == 'unet_128': + net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'unet_256': + net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + else: + raise NotImplementedError('Generator model name [%s] is not recognized' % netG) + return init_net(net, init_type, init_gain, gpu_ids) + + +def define_D(input_nc, ndf, netD, + n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', init_gain=0.02, gpu_ids=[]): + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netD == 'basic': + net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid) + elif netD == 'n_layers': + net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid) + elif netD == 'pixel': + net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid) + else: + raise NotImplementedError('Discriminator model name [%s] is not recognized' % net) + return init_net(net, init_type, init_gain, gpu_ids) + + +############################################################################## +# Classes +############################################################################## + + +# Defines the GAN loss which uses either LSGAN or the regular GAN. +# When LSGAN is used, it is basically same as MSELoss, +# but it abstracts away the need to create the target label tensor +# that has the same size as the input +class GANLoss(nn.Module): + def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0): + super(GANLoss, self).__init__() + self.register_buffer('real_label', torch.tensor(target_real_label)) + self.register_buffer('fake_label', torch.tensor(target_fake_label)) + if use_lsgan: + self.loss = nn.MSELoss() + else: + self.loss = nn.BCELoss() + + def get_target_tensor(self, input, target_is_real): + if target_is_real: + target_tensor = self.real_label + else: + target_tensor = self.fake_label + return target_tensor.expand_as(input) + + def __call__(self, input, target_is_real): + target_tensor = self.get_target_tensor(input, target_is_real) + return self.loss(input, target_tensor) + + +# Defines the generator that consists of Resnet blocks between a few +# downsampling/upsampling operations. +# Code and idea originally from Justin Johnson's architecture. +# https://github.com/jcjohnson/fast-neural-style/ +class ResnetGenerator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'): + assert(n_blocks >= 0) + super(ResnetGenerator, self).__init__() + self.input_nc = input_nc + self.output_nc = output_nc + self.ngf = ngf + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, + bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): + mult = 2**i + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, + stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2**n_downsampling + for i in range(n_blocks): + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): + mult = 2**(n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input): + return self.model(input) + + +# Define a resnet block +class ResnetBlock(nn.Module): + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): + super(ResnetBlock, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), + norm_layer(dim), + nn.ReLU(True)] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), + norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + out = x + self.conv_block(x) + return out + + +# Define a resnet block +class ResnetBlock_sam(nn.Module): + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): + super(ResnetBlock_sam, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), + norm_layer(dim), + nn.ReLU(True)] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), + norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + out = x + self.conv_block(x) + return out + + +# Defines the Unet generator. +# |num_downs|: number of downsamplings in UNet. For example, +# if |num_downs| == 7, image of size 128x128 will become of size 1x1 +# at the bottleneck +class UnetGenerator(nn.Module): + def __init__(self, input_nc, output_nc, num_downs, ngf=64, + norm_layer=nn.BatchNorm2d, use_dropout=False): + super(UnetGenerator, self).__init__() + + # construct unet structure + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) + for i in range(num_downs - 5): + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) + unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) + + self.model = unet_block + + def forward(self, input): + return self.model(input) + + +# Defines the submodule with skip connection. +# X -------------------identity---------------------- X +# |-- downsampling -- |submodule| -- upsampling --| +class UnetSkipConnectionBlock(nn.Module): + def __init__(self, outer_nc, inner_nc, input_nc=None, + submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): + super(UnetSkipConnectionBlock, self).__init__() + self.outermost = outermost + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + if input_nc is None: + input_nc = outer_nc + downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, + stride=2, padding=1, bias=use_bias) + downrelu = nn.LeakyReLU(0.2, True) + downnorm = norm_layer(inner_nc) + uprelu = nn.ReLU(True) + upnorm = norm_layer(outer_nc) + + if outermost: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1) + down = [downconv] + up = [uprelu, upconv, nn.Tanh()] + model = down + [submodule] + up + elif innermost: + upconv = nn.ConvTranspose2d(inner_nc, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv] + up = [uprelu, upconv, upnorm] + model = down + up + else: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv, downnorm] + up = [uprelu, upconv, upnorm] + + if use_dropout: + model = down + [submodule] + up + [nn.Dropout(0.5)] + else: + model = down + [submodule] + up + + self.model = nn.Sequential(*model) + + def forward(self, x): + if self.outermost: + return self.model(x) + else: + return torch.cat([x, self.model(x)], 1) + + +# Defines the PatchGAN discriminator with the specified arguments. +class NLayerDiscriminator(nn.Module): + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False): + super(NLayerDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + kw = 4 + padw = 1 + sequence = [ + nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), + nn.LeakyReLU(0.2, True) + ] + + nf_mult = 1 + nf_mult_prev = 1 + for n in range(1, n_layers): + nf_mult_prev = nf_mult + nf_mult = min(2**n, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, + kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + nf_mult_prev = nf_mult + nf_mult = min(2**n_layers, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, + kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] + + if use_sigmoid: + sequence += [nn.Sigmoid()] + + self.model = nn.Sequential(*sequence) + + def forward(self, input): + return self.model(input) + + +class PixelDiscriminator(nn.Module): + def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False): + super(PixelDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + self.net = [ + nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), + norm_layer(ndf * 2), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] + + if use_sigmoid: + self.net.append(nn.Sigmoid()) + + self.net = nn.Sequential(*self.net) + + def forward(self, input): + return self.net(input) + + +######## SAM ######### + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=0, bias=True) + +class BasicBlock_orj(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock_orj, self).__init__() + self.rp1 = nn.ReflectionPad2d(1) + self.conv1 = conv3x3(inplanes, planes, stride) + self.in1 = nn.InstanceNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.rp2 = nn.ReflectionPad2d(1) + self.conv2 = conv3x3(planes, planes) + self.in2 = nn.InstanceNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + + residual = x + + out = self.rp1(x) + out = self.conv1(out) + out = self.in1(out) + out = self.relu(out) + + out = self.rp2(out) + out = self.conv2(out) + out = self.in2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class BasicBlock_sam(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(BasicBlock_sam, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn1 = nn.InstanceNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = nn.InstanceNorm2d(planes) + self.out_planes = planes + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), + nn.InstanceNorm2d(self.expansion*planes) + ) + + self.final_conv = nn.Sequential( + nn.ReflectionPad2d(1), + nn.Conv2d(self.expansion * planes * 2, self.expansion * planes, kernel_size=3, stride=1, + padding=0, bias=False), + nn.InstanceNorm2d(self.expansion * planes) + ) + else: + self.final_conv = nn.Sequential( + nn.ReflectionPad2d(1), + nn.Conv2d(planes*2, planes, kernel_size=3, stride=1, padding=0, bias=False), + nn.InstanceNorm2d(planes) + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + inputt = self.shortcut(x) + catted = torch.cat((out, inputt), 1) + #out = F.relu(out) + out = self.final_conv(catted) + out = F.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True) + self.bn1 = nn.InstanceNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + bias=True) + self.bn2 = nn.InstanceNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=True) + self.bn3 = nn.InstanceNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class PyramidFeatures(nn.Module): + def __init__(self, C2_size, C3_size, C4_size, C5_size, feature_size=128): + super(PyramidFeatures, self).__init__() + + # upsample C5 to get P5 from the FPN paper + self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + #self.rp1 = nn.ReflectionPad2d(1) + #self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=0) + + # add P5 elementwise to C4 + self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + #self.rp2 = nn.ReflectionPad2d(1) + #self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=0) + + # add P4 elementwise to C3 + self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + #self.rp3 = nn.ReflectionPad2d(1) + #self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=0) + + self.P2_1 = nn.Conv2d(C2_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P2_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + self.rp4 = nn.ReflectionPad2d(1) + self.P2_2 = nn.Conv2d(feature_size, feature_size/2, kernel_size=3, stride=1, padding=0) + + #self.P1_1 = nn.Conv2d(feature_size, feature_size, kernel_size=1, stride=1, padding=0) + #self.P1_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + #self.rp5 = nn.ReflectionPad2d(1) + #self.P1_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=0) + + def forward(self, inputs): + + C2, C3, C4, C5 = inputs + + P5_x = self.P5_1(C5) + P5_upsampled_x = self.P5_upsampled(P5_x) + #P5_x = self.rp1(P5_x) + # #P5_x = self.P5_2(P5_x) + + P4_x = self.P4_1(C4) + P4_x = P5_upsampled_x + P4_x + P4_upsampled_x = self.P4_upsampled(P4_x) + #P4_x = self.rp2(P4_x) + # #P4_x = self.P4_2(P4_x) + + P3_x = self.P3_1(C3) + P3_x = P3_x + P4_upsampled_x + P3_upsampled_x = self.P3_upsampled(P3_x) + #P3_x = self.rp3(P3_x) + #P3_x = self.P3_2(P3_x) + + P2_x = self.P2_1(C2) + P2_x = P2_x + P3_upsampled_x + P2_upsampled_x = self.P2_upsampled(P2_x) + P2_x = self.rp4(P2_upsampled_x) + P2_x = self.P2_2(P2_x) + + #P1_x = self.P1_1(P2_upsampled_x) + #P1_x = P1_x + P2_upsampled_x + + return P2_x + + +class ResNet(nn.Module): + + def __init__(self, block, layers): + self.inplanes = 64 + super(ResNet, self).__init__() + + # first conv + self.pad1 = nn.ReflectionPad2d(3) + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=0, bias=True) + self.in1 = nn.InstanceNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.pad2 = nn.ReflectionPad2d(1) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) + + # Output layer + self.pad3 = nn.ReflectionPad2d(3) + self.conv2 = nn.Conv2d(64, 3, 7) + self.tanh = nn.Tanh() + + if block == BasicBlock_orj: + # residuals + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 128, layers[2], stride=2) + self.layer4 = self._make_layer(block, 256, layers[3], stride=2) + + fpn_sizes = [self.layer1[layers[0] - 1].conv2.out_channels, + self.layer2[layers[1] - 1].conv2.out_channels, + self.layer3[layers[2] - 1].conv2.out_channels, + self.layer4[layers[3] - 1].conv2.out_channels] + + elif block == BasicBlock_sam: + # residuals + self.layer1 = self._make_layer_sam(block, 64, layers[0]) + self.layer2 = self._make_layer_sam(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer_sam(block, 128, layers[2], stride=2) + self.layer4 = self._make_layer_sam(block, 256, layers[3], stride=2) + + fpn_sizes = [self.layer1[layers[0] - 1].conv2.out_channels, + self.layer2[layers[1] - 1].conv2.out_channels, + self.layer3[layers[2] - 1].conv2.out_channels, + self.layer4[layers[3] - 1].conv2.out_channels] + + elif block == Bottleneck: + # residuals + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 128, layers[2], stride=2) + self.layer4 = self._make_layer(block, 256, layers[3], stride=2) + + fpn_sizes = [self.layer1[layers[0] - 1].conv3.out_channels, + self.layer2[layers[1] - 1].conv3.out_channels, + self.layer3[layers[2] - 1].conv3.out_channels, + self.layer4[layers[3] - 1].conv3.out_channels] + + self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2], fpn_sizes[3]) + + #for m in self.modules(): + # if isinstance(m, nn.Conv2d): + # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + # m.weight.data.normal_(0, math.sqrt(2. / n)) + # elif isinstance(m, nn.BatchNorm2d): + # m.weight.data.fill_(1) + # m.bias.data.zero_() + + # self.freeze_bn() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=True), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_layer_sam(self, block, planes, blocks, stride=1): + strides = [stride] + [1] * (blocks - 1) + layers = [] + for stride in strides: + layers.append(block(self.inplanes, planes, stride)) + self.inplanes = planes * block.expansion + return nn.Sequential(*layers) + + def freeze_bn(self): + '''Freeze BatchNorm layers.''' + for layer in self.modules(): + if isinstance(layer, nn.BatchNorm2d): + layer.eval() + + def forward(self, inputs): + + img_batch = inputs + + x = self.pad1(img_batch) + x = self.conv1(x) + x = self.in1(x) + x = self.relu(x) + x = self.pad2(x) + x = self.maxpool(x) + + x1 = self.layer1(x) + x2 = self.layer2(x1) + x3 = self.layer3(x2) + x4 = self.layer4(x3) + + out = self.fpn([x1, x2, x3, x4]) + + out = self.pad3(out) + out = self.conv2(out) + out = self.tanh(out) + + return out + + +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock_sam, [2, 2, 2, 2], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False) + return model + + +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock_sam, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False) + return model + + +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False) + return model + + +# def resnet101(num_classes, pretrained=False, **kwargs): +# """Constructs a ResNet-101 model. +# Args: +# pretrained (bool): If True, returns a model pre-trained on ImageNet +# """ +# model = ResNet(num_classes, Bottleneck, [3, 4, 23, 3], **kwargs) +# if pretrained: +# model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False) +# return model +# +# +# def resnet152(num_classes, pretrained=False, **kwargs): +# """Constructs a ResNet-152 model. +# Args: +# pretrained (bool): If True, returns a model pre-trained on ImageNet +# """ +# model = ResNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs) +# if pretrained: +# model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False) +# return model + + +#### ORJ MODELS ###### +class ResidualBlock(nn.Module): + def __init__(self, in_features): + super(ResidualBlock, self).__init__() + + conv_block = [ nn.ReflectionPad2d(1), + nn.Conv2d(in_features, in_features, 3), + nn.InstanceNorm2d(in_features), + nn.ReLU(inplace=True), + nn.ReflectionPad2d(1), + nn.Conv2d(in_features, in_features, 3), + nn.InstanceNorm2d(in_features) ] + + self.conv_block = nn.Sequential(*conv_block) + + def forward(self, x): + return x + self.conv_block(x) + +class Generator(nn.Module): + def __init__(self, input_nc, output_nc, n_residual_blocks=9): + super(Generator, self).__init__() + + # Initial convolution block + model = [ nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, 64, 7), + nn.InstanceNorm2d(64), + nn.ReLU(inplace=True) ] + + # Downsampling + in_features = 64 + out_features = in_features*2 + for _ in range(2): + model += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), + nn.InstanceNorm2d(out_features), + nn.ReLU(inplace=True) ] + in_features = out_features + out_features = in_features*2 + + # Residual blocks + for _ in range(n_residual_blocks): + model += [ResidualBlock(in_features)] + + # Upsampling + out_features = in_features//2 + for _ in range(2): + model += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), + nn.InstanceNorm2d(out_features), + nn.ReLU(inplace=True) ] + in_features = out_features + out_features = in_features//2 + + # Output layer + model += [ nn.ReflectionPad2d(3), + nn.Conv2d(64, output_nc, 7), + nn.Tanh() ] + + self.model = nn.Sequential(*model) + + def forward(self, x): + return self.model(x) + +class Discriminator(nn.Module): + def __init__(self, input_nc): + super(Discriminator, self).__init__() + + # A bunch of convolutions one after another + model = [ nn.Conv2d(input_nc, 64, 4, stride=2, padding=1), + nn.LeakyReLU(0.2, inplace=True) ] + + model += [ nn.Conv2d(64, 128, 4, stride=2, padding=1), + nn.InstanceNorm2d(128), + nn.LeakyReLU(0.2, inplace=True) ] + + model += [ nn.Conv2d(128, 256, 4, stride=2, padding=1), + nn.InstanceNorm2d(256), + nn.LeakyReLU(0.2, inplace=True) ] + + model += [ nn.Conv2d(256, 512, 4, padding=1), + nn.InstanceNorm2d(512), + nn.LeakyReLU(0.2, inplace=True) ] + + # FCN classification layer + model += [nn.Conv2d(512, 1, 4, padding=1)] + + self.model = nn.Sequential(*model) + + def forward(self, x): + x = self.model(x) + # Average pooling and flatten + return F.avg_pool2d(x, x.size()[2:]).view(x.size()[0], -1) diff --git a/models/networks.pyc b/models/networks.pyc new file mode 100755 index 00000000..73014094 Binary files /dev/null and b/models/networks.pyc differ diff --git a/models/networks.py~ b/models/networks.py~ new file mode 100755 index 00000000..7858d0f6 --- /dev/null +++ b/models/networks.py~ @@ -0,0 +1,845 @@ +import torch +import torch.nn as nn +from torch.nn import init +import functools +from torch.optim import lr_scheduler + +import torch.nn.functional as F + +import math +import torch.utils.model_zoo as model_zoo + +############################################################################### +# Helper Functions +############################################################################### + + +def get_norm_layer(norm_type='instance'): + if norm_type == 'batch': + norm_layer = functools.partial(nn.BatchNorm2d, affine=True) + elif norm_type == 'instance': + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) + elif norm_type == 'none': + norm_layer = None + else: + raise NotImplementedError('normalization layer [%s] is not found' % norm_type) + return norm_layer + + +def get_scheduler(optimizer, opt): + if opt.lr_policy == 'lambda': + def lambda_rule(epoch): + lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1) + return lr_l + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) + elif opt.lr_policy == 'step': + scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) + elif opt.lr_policy == 'plateau': + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) + elif opt.lr_policy == 'cosine': + scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0) + else: + return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) + return scheduler + + +def init_weights(net, init_type='normal', gain=0.02): + def init_func(m): + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: + init.normal_(m.weight.data, 1.0, gain) + init.constant_(m.bias.data, 0.0) + + print('initialize network with %s' % init_type) + net.apply(init_func) + + +def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]): + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + net.to(gpu_ids[0]) + net = torch.nn.DataParallel(net, gpu_ids) + init_weights(net, init_type, gain=init_gain) + return net + + +def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[], depth=18): + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netG == 'resnet_9blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9) + elif netG == 'resnet_6blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6) + elif netG == 'resnet_fpn': + # Create the model + if depth == 18: + net = resnet18(pretrained=False) + print "EVET" + # netG_B2A = resnet18(pretrained=False) + elif depth == 34: + net = resnet34(pretrained=False) + # netG_B2A = resnet34(pretrained=False) + elif depth == 50: + net = resnet50(pretrained=False) + # netG_B2A = resnet50(pretrained=False) + # net = Resnet(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6) + elif netG == 'unet_128': + net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'unet_256': + net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + else: + raise NotImplementedError('Generator model name [%s] is not recognized' % netG) + return init_net(net, init_type, init_gain, gpu_ids) + + +def define_D(input_nc, ndf, netD, + n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', init_gain=0.02, gpu_ids=[]): + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netD == 'basic': + net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid) + elif netD == 'n_layers': + net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid) + elif netD == 'pixel': + net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid) + else: + raise NotImplementedError('Discriminator model name [%s] is not recognized' % net) + return init_net(net, init_type, init_gain, gpu_ids) + + +############################################################################## +# Classes +############################################################################## + + +# Defines the GAN loss which uses either LSGAN or the regular GAN. +# When LSGAN is used, it is basically same as MSELoss, +# but it abstracts away the need to create the target label tensor +# that has the same size as the input +class GANLoss(nn.Module): + def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0): + super(GANLoss, self).__init__() + self.register_buffer('real_label', torch.tensor(target_real_label)) + self.register_buffer('fake_label', torch.tensor(target_fake_label)) + if use_lsgan: + self.loss = nn.MSELoss() + else: + self.loss = nn.BCELoss() + + def get_target_tensor(self, input, target_is_real): + if target_is_real: + target_tensor = self.real_label + else: + target_tensor = self.fake_label + return target_tensor.expand_as(input) + + def __call__(self, input, target_is_real): + target_tensor = self.get_target_tensor(input, target_is_real) + return self.loss(input, target_tensor) + + +# Defines the generator that consists of Resnet blocks between a few +# downsampling/upsampling operations. +# Code and idea originally from Justin Johnson's architecture. +# https://github.com/jcjohnson/fast-neural-style/ +class ResnetGenerator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'): + assert(n_blocks >= 0) + super(ResnetGenerator, self).__init__() + self.input_nc = input_nc + self.output_nc = output_nc + self.ngf = ngf + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, + bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): + mult = 2**i + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, + stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2**n_downsampling + for i in range(n_blocks): + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): + mult = 2**(n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input): + return self.model(input) + + +# Define a resnet block +class ResnetBlock(nn.Module): + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): + super(ResnetBlock, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), + norm_layer(dim), + nn.ReLU(True)] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), + norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + out = x + self.conv_block(x) + return out + + +# Defines the Unet generator. +# |num_downs|: number of downsamplings in UNet. For example, +# if |num_downs| == 7, image of size 128x128 will become of size 1x1 +# at the bottleneck +class UnetGenerator(nn.Module): + def __init__(self, input_nc, output_nc, num_downs, ngf=64, + norm_layer=nn.BatchNorm2d, use_dropout=False): + super(UnetGenerator, self).__init__() + + # construct unet structure + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) + for i in range(num_downs - 5): + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) + unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) + + self.model = unet_block + + def forward(self, input): + return self.model(input) + + +# Defines the submodule with skip connection. +# X -------------------identity---------------------- X +# |-- downsampling -- |submodule| -- upsampling --| +class UnetSkipConnectionBlock(nn.Module): + def __init__(self, outer_nc, inner_nc, input_nc=None, + submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): + super(UnetSkipConnectionBlock, self).__init__() + self.outermost = outermost + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + if input_nc is None: + input_nc = outer_nc + downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, + stride=2, padding=1, bias=use_bias) + downrelu = nn.LeakyReLU(0.2, True) + downnorm = norm_layer(inner_nc) + uprelu = nn.ReLU(True) + upnorm = norm_layer(outer_nc) + + if outermost: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1) + down = [downconv] + up = [uprelu, upconv, nn.Tanh()] + model = down + [submodule] + up + elif innermost: + upconv = nn.ConvTranspose2d(inner_nc, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv] + up = [uprelu, upconv, upnorm] + model = down + up + else: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv, downnorm] + up = [uprelu, upconv, upnorm] + + if use_dropout: + model = down + [submodule] + up + [nn.Dropout(0.5)] + else: + model = down + [submodule] + up + + self.model = nn.Sequential(*model) + + def forward(self, x): + if self.outermost: + return self.model(x) + else: + return torch.cat([x, self.model(x)], 1) + + +# Defines the PatchGAN discriminator with the specified arguments. +class NLayerDiscriminator(nn.Module): + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False): + super(NLayerDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + kw = 4 + padw = 1 + sequence = [ + nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), + nn.LeakyReLU(0.2, True) + ] + + nf_mult = 1 + nf_mult_prev = 1 + for n in range(1, n_layers): + nf_mult_prev = nf_mult + nf_mult = min(2**n, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, + kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + nf_mult_prev = nf_mult + nf_mult = min(2**n_layers, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, + kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] + + if use_sigmoid: + sequence += [nn.Sigmoid()] + + self.model = nn.Sequential(*sequence) + + def forward(self, input): + return self.model(input) + + +class PixelDiscriminator(nn.Module): + def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False): + super(PixelDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + self.net = [ + nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), + norm_layer(ndf * 2), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] + + if use_sigmoid: + self.net.append(nn.Sigmoid()) + + self.net = nn.Sequential(*self.net) + + def forward(self, input): + return self.net(input) + + +######## SAM ######### + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=0, bias=True) + +class BasicBlock_orj(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.rp1 = nn.ReflectionPad2d(1) + self.conv1 = conv3x3(inplanes, planes, stride) + self.in1 = nn.InstanceNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.rp2 = nn.ReflectionPad2d(1) + self.conv2 = conv3x3(planes, planes) + self.in2 = nn.InstanceNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + + residual = x + + out = self.rp1(x) + out = self.conv1(out) + out = self.in1(out) + out = self.relu(out) + + out = self.rp2(out) + out = self.conv2(out) + out = self.in2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + +class BasicBlock_sam(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn1 = nn.InstanceNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = nn.InstanceNorm2d(planes) + self.out_planes = planes + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), + nn.InstanceNorm2d(self.expansion*planes) + ) + + self.final_conv = nn.Sequential( + nn.ReflectionPad2d(1), + nn.Conv2d(self.expansion * planes * 2, self.expansion * planes, kernel_size=3, stride=1, + padding=0, bias=False), + nn.InstanceNorm2d(self.expansion * planes) + ) + else: + self.final_conv = nn.Sequential( + nn.ReflectionPad2d(1), + nn.Conv2d(planes*2, planes, kernel_size=3, stride=1, padding=0, bias=False), + nn.InstanceNorm2d(planes) + ) + + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + inputt = self.shortcut(x) + catted = torch.cat((out, inputt), 1) + #out = F.relu(out) + out = self.final_conv(catted) + out = F.relu(out) + return out + + + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True) + self.bn1 = nn.InstanceNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + bias=True) + self.bn2 = nn.InstanceNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=True) + self.bn3 = nn.InstanceNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class PyramidFeatures(nn.Module): + def __init__(self, C2_size, C3_size, C4_size, C5_size, feature_size=128): + super(PyramidFeatures, self).__init__() + + # upsample C5 to get P5 from the FPN paper + self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + #self.rp1 = nn.ReflectionPad2d(1) + #self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=0) + + # add P5 elementwise to C4 + self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + #self.rp2 = nn.ReflectionPad2d(1) + #self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=0) + + # add P4 elementwise to C3 + self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + #self.rp3 = nn.ReflectionPad2d(1) + #self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=0) + + self.P2_1 = nn.Conv2d(C2_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P2_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + self.rp4 = nn.ReflectionPad2d(1) + self.P2_2 = nn.Conv2d(feature_size, feature_size/2, kernel_size=3, stride=1, padding=0) + + #self.P1_1 = nn.Conv2d(feature_size, feature_size, kernel_size=1, stride=1, padding=0) + #self.P1_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + #self.rp5 = nn.ReflectionPad2d(1) + #self.P1_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=0) + + def forward(self, inputs): + + C2, C3, C4, C5 = inputs + + P5_x = self.P5_1(C5) + P5_upsampled_x = self.P5_upsampled(P5_x) + #P5_x = self.rp1(P5_x) + #P5_x = self.P5_2(P5_x) + + P4_x = self.P4_1(C4) + P4_x = P5_upsampled_x + P4_x + P4_upsampled_x = self.P4_upsampled(P4_x) + #P4_x = self.rp2(P4_x) + #P4_x = self.P4_2(P4_x) + + P3_x = self.P3_1(C3) + P3_x = P3_x + P4_upsampled_x + P3_upsampled_x = self.P3_upsampled(P3_x) + #P3_x = self.rp3(P3_x) + #P3_x = self.P3_2(P3_x) + + P2_x = self.P2_1(C2) + P2_x = P2_x + P3_upsampled_x + P2_upsampled_x = self.P2_upsampled(P2_x) + P2_x = self.rp4(P2_upsampled_x) + P2_x = self.P2_2(P2_x) + + #P1_x = self.P1_1(P2_upsampled_x) + #P1_x = P1_x + P2_upsampled_x + + return P2_x + + +class ResNet(nn.Module): + + def __init__(self, block, layers): + self.inplanes = 64 + super(ResNet, self).__init__() + + # first conv + self.pad1 = nn.ReflectionPad2d(3) + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=0, bias=True) + self.in1 = nn.InstanceNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.pad2 = nn.ReflectionPad2d(1) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) + + # Output layer + self.pad3 = nn.ReflectionPad2d(3) + self.conv2 = nn.Conv2d(64, 3, 7) + self.tanh = nn.Tanh() + + # residuals + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 128, layers[2], stride=2) + self.layer4 = self._make_layer(block, 256, layers[3], stride=2) + + if block == BasicBlock: + fpn_sizes = [self.layer1[layers[0] - 1].conv2.out_channels, + self.layer2[layers[1] - 1].conv2.out_channels, + self.layer3[layers[2] - 1].conv2.out_channels, + self.layer4[layers[3] - 1].conv2.out_channels] + elif block == Bottleneck: + fpn_sizes = [self.layer1[layers[0] - 1].conv3.out_channels, + self.layer2[layers[1] - 1].conv3.out_channels, + self.layer3[layers[2] - 1].conv3.out_channels, + self.layer4[layers[3] - 1].conv3.out_channels] + + self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2], fpn_sizes[3]) + + #for m in self.modules(): + # if isinstance(m, nn.Conv2d): + # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + # m.weight.data.normal_(0, math.sqrt(2. / n)) + # elif isinstance(m, nn.BatchNorm2d): + # m.weight.data.fill_(1) + # m.bias.data.zero_() + + # self.freeze_bn() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=True), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def freeze_bn(self): + '''Freeze BatchNorm layers.''' + for layer in self.modules(): + if isinstance(layer, nn.BatchNorm2d): + layer.eval() + + def forward(self, inputs): + + img_batch = inputs + + x = self.pad1(img_batch) + x = self.conv1(x) + x = self.in1(x) + x = self.relu(x) + x = self.pad2(x) + x = self.maxpool(x) + + x1 = self.layer1(x) + x2 = self.layer2(x1) + x3 = self.layer3(x2) + x4 = self.layer4(x3) + + out = self.fpn([x1, x2, x3, x4]) + + out = self.pad3(out) + out = self.conv2(out) + out = self.tanh(out) + + return out + + +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False) + return model + + +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False) + return model + + +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False) + return model + + +# def resnet101(num_classes, pretrained=False, **kwargs): +# """Constructs a ResNet-101 model. +# Args: +# pretrained (bool): If True, returns a model pre-trained on ImageNet +# """ +# model = ResNet(num_classes, Bottleneck, [3, 4, 23, 3], **kwargs) +# if pretrained: +# model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False) +# return model +# +# +# def resnet152(num_classes, pretrained=False, **kwargs): +# """Constructs a ResNet-152 model. +# Args: +# pretrained (bool): If True, returns a model pre-trained on ImageNet +# """ +# model = ResNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs) +# if pretrained: +# model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False) +# return model + + +#### ORJ MODELS ###### +class ResidualBlock(nn.Module): + def __init__(self, in_features): + super(ResidualBlock, self).__init__() + + conv_block = [ nn.ReflectionPad2d(1), + nn.Conv2d(in_features, in_features, 3), + nn.InstanceNorm2d(in_features), + nn.ReLU(inplace=True), + nn.ReflectionPad2d(1), + nn.Conv2d(in_features, in_features, 3), + nn.InstanceNorm2d(in_features) ] + + self.conv_block = nn.Sequential(*conv_block) + + def forward(self, x): + return x + self.conv_block(x) + +class Generator(nn.Module): + def __init__(self, input_nc, output_nc, n_residual_blocks=9): + super(Generator, self).__init__() + + # Initial convolution block + model = [ nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, 64, 7), + nn.InstanceNorm2d(64), + nn.ReLU(inplace=True) ] + + # Downsampling + in_features = 64 + out_features = in_features*2 + for _ in range(2): + model += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), + nn.InstanceNorm2d(out_features), + nn.ReLU(inplace=True) ] + in_features = out_features + out_features = in_features*2 + + # Residual blocks + for _ in range(n_residual_blocks): + model += [ResidualBlock(in_features)] + + # Upsampling + out_features = in_features//2 + for _ in range(2): + model += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), + nn.InstanceNorm2d(out_features), + nn.ReLU(inplace=True) ] + in_features = out_features + out_features = in_features//2 + + # Output layer + model += [ nn.ReflectionPad2d(3), + nn.Conv2d(64, output_nc, 7), + nn.Tanh() ] + + self.model = nn.Sequential(*model) + + def forward(self, x): + return self.model(x) + +class Discriminator(nn.Module): + def __init__(self, input_nc): + super(Discriminator, self).__init__() + + # A bunch of convolutions one after another + model = [ nn.Conv2d(input_nc, 64, 4, stride=2, padding=1), + nn.LeakyReLU(0.2, inplace=True) ] + + model += [ nn.Conv2d(64, 128, 4, stride=2, padding=1), + nn.InstanceNorm2d(128), + nn.LeakyReLU(0.2, inplace=True) ] + + model += [ nn.Conv2d(128, 256, 4, stride=2, padding=1), + nn.InstanceNorm2d(256), + nn.LeakyReLU(0.2, inplace=True) ] + + model += [ nn.Conv2d(256, 512, 4, padding=1), + nn.InstanceNorm2d(512), + nn.LeakyReLU(0.2, inplace=True) ] + + # FCN classification layer + model += [nn.Conv2d(512, 1, 4, padding=1)] + + self.model = nn.Sequential(*model) + + def forward(self, x): + x = self.model(x) + # Average pooling and flatten + return F.avg_pool2d(x, x.size()[2:]).view(x.size()[0], -1) diff --git a/models/pix2pix_model.py b/models/pix2pix_model.py new file mode 100755 index 00000000..466de4fe --- /dev/null +++ b/models/pix2pix_model.py @@ -0,0 +1,111 @@ +import torch +from util.image_pool import ImagePool +from .base_model import BaseModel +from . import networks + + +class Pix2PixModel(BaseModel): + def name(self): + return 'Pix2PixModel' + + @staticmethod + def modify_commandline_options(parser, is_train=True): + + # changing the default values to match the pix2pix paper + # (https://phillipi.github.io/pix2pix/) + parser.set_defaults(pool_size=0, no_lsgan=True, norm='batch') + parser.set_defaults(dataset_mode='aligned') + parser.set_defaults(netG='unet_256') + if is_train: + parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss') + + return parser + + def initialize(self, opt): + BaseModel.initialize(self, opt) + self.isTrain = opt.isTrain + # specify the training losses you want to print out. The program will call base_model.get_current_losses + self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake'] + # specify the images you want to save/display. The program will call base_model.get_current_visuals + self.visual_names = ['real_A', 'fake_B', 'real_B'] + # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks + if self.isTrain: + self.model_names = ['G', 'D'] + else: # during test time, only load Gs + self.model_names = ['G'] + # load/define networks + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, + not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) + + if self.isTrain: + use_sigmoid = opt.no_lsgan + self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD, + opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids) + + if self.isTrain: + self.fake_AB_pool = ImagePool(opt.pool_size) + # define loss functions + self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device) + self.criterionL1 = torch.nn.L1Loss() + + # initialize optimizers + self.optimizers = [] + self.optimizer_G = torch.optim.Adam(self.netG.parameters(), + lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizer_D = torch.optim.Adam(self.netD.parameters(), + lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D) + + def set_input(self, input): + AtoB = self.opt.direction == 'AtoB' + self.real_A = input['A' if AtoB else 'B'].to(self.device) + self.real_B = input['B' if AtoB else 'A'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + + def forward(self): + self.fake_B = self.netG(self.real_A) + + def backward_D(self): + # Fake + # stop backprop to the generator by detaching fake_B + fake_AB = self.fake_AB_pool.query(torch.cat((self.real_A, self.fake_B), 1)) + pred_fake = self.netD(fake_AB.detach()) + self.loss_D_fake = self.criterionGAN(pred_fake, False) + + # Real + real_AB = torch.cat((self.real_A, self.real_B), 1) + pred_real = self.netD(real_AB) + self.loss_D_real = self.criterionGAN(pred_real, True) + + # Combined loss + self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 + + self.loss_D.backward() + + def backward_G(self): + # First, G(A) should fake the discriminator + fake_AB = torch.cat((self.real_A, self.fake_B), 1) + pred_fake = self.netD(fake_AB) + self.loss_G_GAN = self.criterionGAN(pred_fake, True) + + # Second, G(A) = B + self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1 + + self.loss_G = self.loss_G_GAN + self.loss_G_L1 + + self.loss_G.backward() + + def optimize_parameters(self): + self.forward() + # update D + self.set_requires_grad(self.netD, True) + self.optimizer_D.zero_grad() + self.backward_D() + self.optimizer_D.step() + + # update G + self.set_requires_grad(self.netD, False) + self.optimizer_G.zero_grad() + self.backward_G() + self.optimizer_G.step() diff --git a/models/test_model.py b/models/test_model.py new file mode 100755 index 00000000..4b4de4e0 --- /dev/null +++ b/models/test_model.py @@ -0,0 +1,46 @@ +from .base_model import BaseModel +from . import networks +from .cycle_gan_model import CycleGANModel + + +class TestModel(BaseModel): + def name(self): + return 'TestModel' + + @staticmethod + def modify_commandline_options(parser, is_train=True): + assert not is_train, 'TestModel cannot be used in train mode' + parser = CycleGANModel.modify_commandline_options(parser, is_train=False) + parser.set_defaults(dataset_mode='single') + + parser.add_argument('--model_suffix', type=str, default='', + help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will' + ' be loaded as the generator of TestModel') + + return parser + + def initialize(self, opt): + assert(not opt.isTrain) + BaseModel.initialize(self, opt) + + # specify the training losses you want to print out. The program will call base_model.get_current_losses + self.loss_names = [] + # specify the images you want to save/display. The program will call base_model.get_current_visuals + self.visual_names = ['real_A', 'fake_B'] + # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks + self.model_names = ['G' + opt.model_suffix] + + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, + opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) + + # assigns the model to self.netG_[suffix] so that it can be loaded + # please see BaseModel.load_networks + setattr(self, 'netG' + opt.model_suffix, self.netG) + + def set_input(self, input): + # we need to use single_dataset mode + self.real_A = input['A'].to(self.device) + self.image_paths = input['A_paths'] + + def forward(self): + self.fake_B = self.netG(self.real_A) diff --git a/options/__init__.py b/options/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/options/__init__.pyc b/options/__init__.pyc new file mode 100755 index 00000000..b2242abb Binary files /dev/null and b/options/__init__.pyc differ diff --git a/options/base_options.py b/options/base_options.py new file mode 100755 index 00000000..99c7d364 --- /dev/null +++ b/options/base_options.py @@ -0,0 +1,115 @@ +import argparse +import os +from util import util +import torch +import models +import data + + +class BaseOptions(): + def __init__(self): + self.initialized = False + + def initialize(self, parser): + parser.add_argument('--dataroot', required=False, default="./dataset/hayao", help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') + parser.add_argument('--name', type=str, default='dadsda', help='name of the experiment. It decides where to store samples and models') + parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') + parser.add_argument('--batch_size', type=int, default=1, help='input batch size') + parser.add_argument('--loadSize', type=int, default=286, help='scale images to this size') + parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size') + parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML') + parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels') + parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels') + parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') + parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') + parser.add_argument('--netD', type=str, default='basic', help='selects model to use for netD') + parser.add_argument('--netG', type=str, default='resnet_fpn', help='selects model to use for netG') + parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') + parser.add_argument('--gpu_ids', type=str, default='-1', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') + parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single]') + parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. cycle_gan, pix2pix, test') + parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') + parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') + parser.add_argument('--num_threads', default=8, type=int, help='# threads for loading data') + parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization') + parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') + parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') + parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') + parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop|none]') + parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') + parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') + parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') + parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') + parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') + self.initialized = True + return parser + + def gather_options(self): + # initialize parser with basic options + if not self.initialized: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = self.initialize(parser) + + # get the basic options + opt, _ = parser.parse_known_args() + + # modify model-related parser options + model_name = opt.model + model_option_setter = models.get_option_setter(model_name) + parser = model_option_setter(parser, self.isTrain) + opt, _ = parser.parse_known_args() # parse again with the new defaults + + # modify dataset-related parser options + dataset_name = opt.dataset_mode + dataset_option_setter = data.get_option_setter(dataset_name) + parser = dataset_option_setter(parser, self.isTrain) + + self.parser = parser + + return parser.parse_args() + + def print_options(self, opt): + message = '' + message += '----------------- Options ---------------\n' + for k, v in sorted(vars(opt).items()): + comment = '' + default = self.parser.get_default(k) + if v != default: + comment = '\t[default: %s]' % str(default) + message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) + message += '----------------- End -------------------' + print(message) + + # save to the disk + expr_dir = os.path.join(opt.checkpoints_dir, opt.name) + util.mkdirs(expr_dir) + file_name = os.path.join(expr_dir, 'opt.txt') + with open(file_name, 'wt') as opt_file: + opt_file.write(message) + opt_file.write('\n') + + def parse(self): + + opt = self.gather_options() + opt.isTrain = self.isTrain # train or test + + # process opt.suffix + if opt.suffix: + suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' + opt.name = opt.name + suffix + + self.print_options(opt) + + # set gpu ids + str_ids = opt.gpu_ids.split(',') + opt.gpu_ids = [] + for str_id in str_ids: + id = int(str_id) + if id >= 0: + opt.gpu_ids.append(id) + if len(opt.gpu_ids) > 0: + torch.cuda.set_device(opt.gpu_ids[0]) + + self.opt = opt + return self.opt diff --git a/options/base_options.pyc b/options/base_options.pyc new file mode 100755 index 00000000..1b3fd54b Binary files /dev/null and b/options/base_options.pyc differ diff --git a/options/test_options.py b/options/test_options.py new file mode 100755 index 00000000..731596a1 --- /dev/null +++ b/options/test_options.py @@ -0,0 +1,19 @@ +from .base_options import BaseOptions + + +class TestOptions(BaseOptions): + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) + parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') + parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') + parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') + parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') + # Dropout and Batchnorm has different behavioir during training and test. + parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') + parser.add_argument('--num_test', type=int, default=50, help='how many test images to run') + + parser.set_defaults(model='test') + # To avoid cropping, the loadSize should be the same as fineSize + parser.set_defaults(loadSize=parser.get_default('fineSize')) + self.isTrain = False + return parser diff --git a/options/train_options.py b/options/train_options.py new file mode 100755 index 00000000..3dd1e149 --- /dev/null +++ b/options/train_options.py @@ -0,0 +1,31 @@ +from .base_options import BaseOptions + + +class TrainOptions(BaseOptions): + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) + parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen') + parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') + parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') + parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display') + parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') + parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') + parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') + parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') + parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') + parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') + parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') + parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') + parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') + parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') + parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') + parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') + parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') + parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') + parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') + parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') + parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') + parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') + + self.isTrain = True + return parser diff --git a/options/train_options.pyc b/options/train_options.pyc new file mode 100755 index 00000000..4c833a42 Binary files /dev/null and b/options/train_options.pyc differ diff --git a/requirements.txt b/requirements.txt new file mode 100755 index 00000000..072d027a --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +torch>=0.4.0 +torchvision>=0.2.1 +dominate>=2.3.1 +visdom>=0.1.8.3 diff --git a/scripts/conda_deps.sh b/scripts/conda_deps.sh new file mode 100755 index 00000000..72df436f --- /dev/null +++ b/scripts/conda_deps.sh @@ -0,0 +1,4 @@ +set -ex +conda install numpy pyyaml mkl mkl-include setuptools cmake cffi typing +conda install pytorch torchvision -c pytorch # add cuda90 if CUDA 9 +conda install visdom dominate -c conda-forge # install visdom and dominate diff --git a/scripts/download_cyclegan_model.sh b/scripts/download_cyclegan_model.sh new file mode 100755 index 00000000..26e198a4 --- /dev/null +++ b/scripts/download_cyclegan_model.sh @@ -0,0 +1,11 @@ +FILE=$1 + +echo "Note: available models are apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower" + +echo "Specified [$FILE]" + +mkdir -p ./checkpoints/${FILE}_pretrained +MODEL_FILE=./checkpoints/${FILE}_pretrained/latest_net_G.pth +URL=http://efrosgans.eecs.berkeley.edu/cyclegan/pretrained_models/$FILE.pth + +wget -N $URL -O $MODEL_FILE diff --git a/scripts/download_pix2pix_model.sh b/scripts/download_pix2pix_model.sh new file mode 100755 index 00000000..623a4084 --- /dev/null +++ b/scripts/download_pix2pix_model.sh @@ -0,0 +1,11 @@ +FILE=$1 + +echo "Note: available models are edges2shoes, sat2map, facades_label2photo, and day2night" + +echo "Specified [$FILE]" + +mkdir -p ./checkpoints/${FILE}_pretrained +MODEL_FILE=./checkpoints/${FILE}_pretrained/latest_net_G.pth +URL=http://efrosgans.eecs.berkeley.edu/pix2pix/models-pytorch/$FILE.pth + +wget -N $URL -O $MODEL_FILE diff --git a/scripts/install_deps.sh b/scripts/install_deps.sh new file mode 100755 index 00000000..801c7dde --- /dev/null +++ b/scripts/install_deps.sh @@ -0,0 +1,3 @@ +set -ex +pip install visdom +pip install dominate diff --git a/scripts/test_before_push.py b/scripts/test_before_push.py new file mode 100755 index 00000000..0e4d7874 --- /dev/null +++ b/scripts/test_before_push.py @@ -0,0 +1,39 @@ +# Simple script to make sure basic usage +# such as training, testing, saving and loading +# runs without errors. +import os + + +def run(command): + print(command) + exit_status = os.system(command) + if exit_status > 0: + exit(1) + + +if __name__ == '__main__': + if not os.path.exists('./datasets/mini'): + run('bash ./datasets/download_cyclegan_dataset.sh mini') + + if not os.path.exists('./datasets/mini_pix2pix'): + run('bash ./datasets/download_cyclegan_dataset.sh mini_pix2pix') + + # pretrained cyclegan model + if not os.path.exists('./checkpoints/horse2zebra_pretrained/latest_net_G.pth'): + run('bash ./scripts/download_cyclegan_model.sh horse2zebra') + run('python test.py --model test --dataroot ./datasets/mini --name horse2zebra_pretrained --no_dropout --num_test 1') + + # pretrained pix2pix model + if not os.path.exists('./checkpoints/facades_label2photo_pretrained/latest_net_G.pth'): + run('bash ./scripts/download_pix2pix_model.sh facades_label2photo') + if not os.path.exists('./datasets/facades'): + run('bash ./datasets/download_pix2pix_dataset.sh facades') + run('python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained --num_test 1') + + # cyclegan train/test + run('python train.py --name temp --dataroot ./datasets/mini --niter 1 --niter_decay 0 --save_latest_freq 10 --print_freq 1 --display_id -1') + run('python test.py --name temp --dataroot ./datasets/mini --num_test 1 --model_suffix "_A"') + + # pix2pix train/test + run('python train.py --model pix2pix --name temp --dataroot ./datasets/mini_pix2pix --niter 1 --niter_decay 0 --save_latest_freq 10 --display_id -1') + run('python test.py --model pix2pix --name temp --dataroot ./datasets/mini_pix2pix --num_test 1 --direction BtoA') diff --git a/scripts/test_cyclegan.sh b/scripts/test_cyclegan.sh new file mode 100755 index 00000000..9036bf86 --- /dev/null +++ b/scripts/test_cyclegan.sh @@ -0,0 +1,2 @@ +set -ex +python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan --phase test --no_dropout diff --git a/scripts/test_pix2pix.sh b/scripts/test_pix2pix.sh new file mode 100755 index 00000000..589599b4 --- /dev/null +++ b/scripts/test_pix2pix.sh @@ -0,0 +1,2 @@ +set -ex +python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --netG unet_256 --direction BtoA --dataset_mode aligned --norm batch diff --git a/scripts/test_single.sh b/scripts/test_single.sh new file mode 100755 index 00000000..eada6402 --- /dev/null +++ b/scripts/test_single.sh @@ -0,0 +1,2 @@ +set -ex +python test.py --dataroot ./datasets/facades/testB/ --name facades_pix2pix --model test --netG unet_256 --direction BtoA --dataset_mode single --norm batch diff --git a/scripts/train_cyclegan.sh b/scripts/train_cyclegan.sh new file mode 100755 index 00000000..567721e9 --- /dev/null +++ b/scripts/train_cyclegan.sh @@ -0,0 +1,2 @@ +set -ex +python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan --pool_size 50 --no_dropout diff --git a/scripts/train_pix2pix.sh b/scripts/train_pix2pix.sh new file mode 100755 index 00000000..6247cfbf --- /dev/null +++ b/scripts/train_pix2pix.sh @@ -0,0 +1,2 @@ +set -ex +python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --netG unet_256 --direction BtoA --lambda_L1 100 --dataset_mode aligned --no_lsgan --norm batch --pool_size 0 diff --git a/test.py b/test.py new file mode 100755 index 00000000..2c059241 --- /dev/null +++ b/test.py @@ -0,0 +1,40 @@ +import os +from options.test_options import TestOptions +from data import CreateDataLoader +from models import create_model +from util.visualizer import save_images +from util import html + + +if __name__ == '__main__': + opt = TestOptions().parse() + # hard-code some parameters for test + opt.num_threads = 1 # test code only supports num_threads = 1 + opt.batch_size = 1 # test code only supports batch_size = 1 + opt.serial_batches = True # no shuffle + opt.no_flip = True # no flip + opt.display_id = -1 # no visdom display + data_loader = CreateDataLoader(opt) + dataset = data_loader.load_data() + model = create_model(opt) + model.setup(opt) + # create a website + web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch)) + webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) + # test with eval mode. This only affects layers like batchnorm and dropout. + # pix2pix: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode. + # CycleGAN: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout. + if opt.eval: + model.eval() + for i, data in enumerate(dataset): + if i >= opt.num_test: + break + model.set_input(data) + model.test() + visuals = model.get_current_visuals() + img_path = model.get_image_paths() + if i % 5 == 0: + print('processing (%04d)-th image... %s' % (i, img_path)) + save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize) + # save the website + webpage.save() diff --git a/train.py b/train.py new file mode 100755 index 00000000..c65f5d93 --- /dev/null +++ b/train.py @@ -0,0 +1,59 @@ +import time +from options.train_options import TrainOptions +from data import CreateDataLoader +from models import create_model +from util.visualizer import Visualizer + +if __name__ == '__main__': + opt = TrainOptions().parse() + data_loader = CreateDataLoader(opt) + dataset = data_loader.load_data() + dataset_size = len(data_loader) + print('#training images = %d' % dataset_size) + + model = create_model(opt) + model.setup(opt) + visualizer = Visualizer(opt) + total_steps = 0 + + for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): + epoch_start_time = time.time() + iter_data_time = time.time() + epoch_iter = 0 + + for i, data in enumerate(dataset): + iter_start_time = time.time() + if total_steps % opt.print_freq == 0: + t_data = iter_start_time - iter_data_time + visualizer.reset() + total_steps += opt.batch_size + epoch_iter += opt.batch_size + model.set_input(data) + model.optimize_parameters() + + if total_steps % opt.display_freq == 0: + save_result = total_steps % opt.update_html_freq == 0 + visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) + + if total_steps % opt.print_freq == 0: + losses = model.get_current_losses() + t = (time.time() - iter_start_time) / opt.batch_size + visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data) + if opt.display_id > 0: + visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses) + + if total_steps % opt.save_latest_freq == 0: + print('saving the latest model (epoch %d, total_steps %d)' % + (epoch, total_steps)) + model.save_networks('latest') + + iter_data_time = time.time() + if epoch % opt.save_epoch_freq == 0: + print('saving the model at the end of epoch %d, iters %d' % + (epoch, total_steps)) + model.save_networks('latest') + model.save_networks(epoch) + + print('End of epoch %d / %d \t Time Taken: %d sec' % + (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time)) + model.update_learning_rate() diff --git a/util/__init__.py b/util/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/util/__init__.pyc b/util/__init__.pyc new file mode 100755 index 00000000..9293b1a4 Binary files /dev/null and b/util/__init__.pyc differ diff --git a/util/get_data.py b/util/get_data.py new file mode 100755 index 00000000..6325605b --- /dev/null +++ b/util/get_data.py @@ -0,0 +1,115 @@ +from __future__ import print_function +import os +import tarfile +import requests +from warnings import warn +from zipfile import ZipFile +from bs4 import BeautifulSoup +from os.path import abspath, isdir, join, basename + + +class GetData(object): + """ + + Download CycleGAN or Pix2Pix Data. + + Args: + technique : str + One of: 'cyclegan' or 'pix2pix'. + verbose : bool + If True, print additional information. + + Examples: + >>> from util.get_data import GetData + >>> gd = GetData(technique='cyclegan') + >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed. + + """ + + def __init__(self, technique='cyclegan', verbose=True): + url_dict = { + 'pix2pix': 'https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets', + 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets' + } + self.url = url_dict.get(technique.lower()) + self._verbose = verbose + + def _print(self, text): + if self._verbose: + print(text) + + @staticmethod + def _get_options(r): + soup = BeautifulSoup(r.text, 'lxml') + options = [h.text for h in soup.find_all('a', href=True) + if h.text.endswith(('.zip', 'tar.gz'))] + return options + + def _present_options(self): + r = requests.get(self.url) + options = self._get_options(r) + print('Options:\n') + for i, o in enumerate(options): + print("{0}: {1}".format(i, o)) + choice = input("\nPlease enter the number of the " + "dataset above you wish to download:") + return options[int(choice)] + + def _download_data(self, dataset_url, save_path): + if not isdir(save_path): + os.makedirs(save_path) + + base = basename(dataset_url) + temp_save_path = join(save_path, base) + + with open(temp_save_path, "wb") as f: + r = requests.get(dataset_url) + f.write(r.content) + + if base.endswith('.tar.gz'): + obj = tarfile.open(temp_save_path) + elif base.endswith('.zip'): + obj = ZipFile(temp_save_path, 'r') + else: + raise ValueError("Unknown File Type: {0}.".format(base)) + + self._print("Unpacking Data...") + obj.extractall(save_path) + obj.close() + os.remove(temp_save_path) + + def get(self, save_path, dataset=None): + """ + + Download a dataset. + + Args: + save_path : str + A directory to save the data to. + dataset : str, optional + A specific dataset to download. + Note: this must include the file extension. + If None, options will be presented for you + to choose from. + + Returns: + save_path_full : str + The absolute path to the downloaded data. + + """ + if dataset is None: + selected_dataset = self._present_options() + else: + selected_dataset = dataset + + save_path_full = join(save_path, selected_dataset.split('.')[0]) + + if isdir(save_path_full): + warn("\n'{0}' already exists. Voiding Download.".format( + save_path_full)) + else: + self._print('Downloading Data...') + url = "{0}/{1}".format(self.url, selected_dataset) + self._download_data(url, save_path=save_path) + + return abspath(save_path_full) diff --git a/util/html.py b/util/html.py new file mode 100755 index 00000000..c7956f13 --- /dev/null +++ b/util/html.py @@ -0,0 +1,64 @@ +import dominate +from dominate.tags import * +import os + + +class HTML: + def __init__(self, web_dir, title, reflesh=0): + self.title = title + self.web_dir = web_dir + self.img_dir = os.path.join(self.web_dir, 'images') + if not os.path.exists(self.web_dir): + os.makedirs(self.web_dir) + if not os.path.exists(self.img_dir): + os.makedirs(self.img_dir) + # print(self.img_dir) + + self.doc = dominate.document(title=title) + if reflesh > 0: + with self.doc.head: + meta(http_equiv="reflesh", content=str(reflesh)) + + def get_image_dir(self): + return self.img_dir + + def add_header(self, str): + with self.doc: + h3(str) + + def add_table(self, border=1): + self.t = table(border=border, style="table-layout: fixed;") + self.doc.add(self.t) + + def add_images(self, ims, txts, links, width=400): + self.add_table() + with self.t: + with tr(): + for im, txt, link in zip(ims, txts, links): + with td(style="word-wrap: break-word;", halign="center", valign="top"): + with p(): + with a(href=os.path.join('images', link)): + img(style="width:%dpx" % width, src=os.path.join('images', im)) + br() + p(txt) + + def save(self): + html_file = '%s/index.html' % self.web_dir + f = open(html_file, 'wt') + f.write(self.doc.render()) + f.close() + + +if __name__ == '__main__': + html = HTML('web/', 'test_html') + html.add_header('hello world') + + ims = [] + txts = [] + links = [] + for n in range(4): + ims.append('image_%d.png' % n) + txts.append('text_%d' % n) + links.append('image_%d.png' % n) + html.add_images(ims, txts, links) + html.save() diff --git a/util/html.pyc b/util/html.pyc new file mode 100755 index 00000000..55262ef2 Binary files /dev/null and b/util/html.pyc differ diff --git a/util/image_pool.py b/util/image_pool.py new file mode 100755 index 00000000..52413e0f --- /dev/null +++ b/util/image_pool.py @@ -0,0 +1,32 @@ +import random +import torch + + +class ImagePool(): + def __init__(self, pool_size): + self.pool_size = pool_size + if self.pool_size > 0: + self.num_imgs = 0 + self.images = [] + + def query(self, images): + if self.pool_size == 0: + return images + return_images = [] + for image in images: + image = torch.unsqueeze(image.data, 0) + if self.num_imgs < self.pool_size: + self.num_imgs = self.num_imgs + 1 + self.images.append(image) + return_images.append(image) + else: + p = random.uniform(0, 1) + if p > 0.5: + random_id = random.randint(0, self.pool_size - 1) # randint is inclusive + tmp = self.images[random_id].clone() + self.images[random_id] = image + return_images.append(tmp) + else: + return_images.append(image) + return_images = torch.cat(return_images, 0) + return return_images diff --git a/util/image_pool.pyc b/util/image_pool.pyc new file mode 100755 index 00000000..ceb0e7bb Binary files /dev/null and b/util/image_pool.pyc differ diff --git a/util/util.py b/util/util.py new file mode 100755 index 00000000..ba7b083c --- /dev/null +++ b/util/util.py @@ -0,0 +1,60 @@ +from __future__ import print_function +import torch +import numpy as np +from PIL import Image +import os + + +# Converts a Tensor into an image array (numpy) +# |imtype|: the desired type of the converted numpy array +def tensor2im(input_image, imtype=np.uint8): + if isinstance(input_image, torch.Tensor): + image_tensor = input_image.data + else: + return input_image + image_numpy = image_tensor[0].cpu().float().numpy() + if image_numpy.shape[0] == 1: + image_numpy = np.tile(image_numpy, (3, 1, 1)) + image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 + return image_numpy.astype(imtype) + + +def diagnose_network(net, name='network'): + mean = 0.0 + count = 0 + for param in net.parameters(): + if param.grad is not None: + mean += torch.mean(torch.abs(param.grad.data)) + count += 1 + if count > 0: + mean = mean / count + print(name) + print(mean) + + +def save_image(image_numpy, image_path): + image_pil = Image.fromarray(image_numpy) + image_pil.save(image_path) + + +def print_numpy(x, val=True, shp=False): + x = x.astype(np.float64) + if shp: + print('shape,', x.shape) + if val: + x = x.flatten() + print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( + np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) + + +def mkdirs(paths): + if isinstance(paths, list) and not isinstance(paths, str): + for path in paths: + mkdir(path) + else: + mkdir(paths) + + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) diff --git a/util/util.pyc b/util/util.pyc new file mode 100755 index 00000000..c25eb8b1 Binary files /dev/null and b/util/util.pyc differ diff --git a/util/visualizer.py b/util/visualizer.py new file mode 100755 index 00000000..20816a1b --- /dev/null +++ b/util/visualizer.py @@ -0,0 +1,169 @@ +import numpy as np +import os +import sys +import ntpath +import time +from . import util +from . import html +from scipy.misc import imresize + +if sys.version_info[0] == 2: + VisdomExceptionBase = Exception +else: + VisdomExceptionBase = ConnectionError + + +# save image to the disk +def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): + image_dir = webpage.get_image_dir() + short_path = ntpath.basename(image_path[0]) + name = os.path.splitext(short_path)[0] + + webpage.add_header(name) + ims, txts, links = [], [], [] + + for label, im_data in visuals.items(): + im = util.tensor2im(im_data) + image_name = '%s_%s.png' % (name, label) + save_path = os.path.join(image_dir, image_name) + h, w, _ = im.shape + if aspect_ratio > 1.0: + im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic') + if aspect_ratio < 1.0: + im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic') + util.save_image(im, save_path) + + ims.append(image_name) + txts.append(label) + links.append(image_name) + webpage.add_images(ims, txts, links, width=width) + + +class Visualizer(): + def __init__(self, opt): + self.display_id = opt.display_id + self.use_html = opt.isTrain and not opt.no_html + self.win_size = opt.display_winsize + self.name = opt.name + self.opt = opt + self.saved = False + if self.display_id > 0: + import visdom + self.ncols = opt.display_ncols + self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env, raise_exceptions=True) + + if self.use_html: + self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') + self.img_dir = os.path.join(self.web_dir, 'images') + print('create web directory %s...' % self.web_dir) + util.mkdirs([self.web_dir, self.img_dir]) + self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') + with open(self.log_name, "a") as log_file: + now = time.strftime("%c") + log_file.write('================ Training Loss (%s) ================\n' % now) + + def reset(self): + self.saved = False + + def throw_visdom_connection_error(self): + print('\n\nCould not connect to Visdom server (https://github.com/facebookresearch/visdom) for displaying training progress.\nYou can suppress connection to Visdom using the option --display_id -1. To install visdom, run \n$ pip install visdom\n, and start the server by \n$ python -m visdom.server.\n\n') + exit(1) + + # |visuals|: dictionary of images to display or save + def display_current_results(self, visuals, epoch, save_result): + if self.display_id > 0: # show images in the browser + ncols = self.ncols + if ncols > 0: + ncols = min(ncols, len(visuals)) + h, w = next(iter(visuals.values())).shape[:2] + table_css = """""" % (w, h) + title = self.name + label_html = '' + label_html_row = '' + images = [] + idx = 0 + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + label_html_row += '%s' % label + images.append(image_numpy.transpose([2, 0, 1])) + idx += 1 + if idx % ncols == 0: + label_html += '%s' % label_html_row + label_html_row = '' + white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255 + while idx % ncols != 0: + images.append(white_image) + label_html_row += '' + idx += 1 + if label_html_row != '': + label_html += '%s' % label_html_row + # pane col = image row + try: + self.vis.images(images, nrow=ncols, win=self.display_id + 1, + padding=2, opts=dict(title=title + ' images')) + label_html = '%s
' % label_html + self.vis.text(table_css + label_html, win=self.display_id + 2, + opts=dict(title=title + ' labels')) + except VisdomExceptionBase: + self.throw_visdom_connection_error() + + else: + idx = 1 + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), + win=self.display_id + idx) + idx += 1 + + if self.use_html and (save_result or not self.saved): # save images to a html file + self.saved = True + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) + util.save_image(image_numpy, img_path) + # update website + webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1) + for n in range(epoch, 0, -1): + webpage.add_header('epoch [%d]' % n) + ims, txts, links = [], [], [] + + for label, image_numpy in visuals.items(): + image_numpy = util.tensor2im(image) + img_path = 'epoch%.3d_%s.png' % (n, label) + ims.append(img_path) + txts.append(label) + links.append(img_path) + webpage.add_images(ims, txts, links, width=self.win_size) + webpage.save() + + # losses: dictionary of error labels and values + def plot_current_losses(self, epoch, counter_ratio, opt, losses): + if not hasattr(self, 'plot_data'): + self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())} + self.plot_data['X'].append(epoch + counter_ratio) + self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']]) + try: + self.vis.line( + X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1), + Y=np.array(self.plot_data['Y']), + opts={ + 'title': self.name + ' loss over time', + 'legend': self.plot_data['legend'], + 'xlabel': 'epoch', + 'ylabel': 'loss'}, + win=self.display_id) + except VisdomExceptionBase: + self.throw_visdom_connection_error() + + # losses: same format as |losses| of plot_current_losses + def print_current_losses(self, epoch, i, losses, t, t_data): + message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, i, t, t_data) + for k, v in losses.items(): + message += '%s: %.3f ' % (k, v) + + print(message) + with open(self.log_name, "a") as log_file: + log_file.write('%s\n' % message) diff --git a/util/visualizer.pyc b/util/visualizer.pyc new file mode 100755 index 00000000..e944ff5e Binary files /dev/null and b/util/visualizer.pyc differ