"""VQGAN Loss
 - Adapted from https://github.com/CompVis/taming-transformers
"""
import torch
import torch.nn as nn
import torch.nn.functional as F

from .discriminator import NLayerDiscriminator, weights_init
from .blocks import LossCriterion, LossCriterionMask

class DummyLoss(nn.Module):
    def __init__(self):
        super().__init__()


def adopt_weight(weight, global_step, threshold=0, value=0.):
    if global_step < threshold:
        weight = value
    return weight


def hinge_d_loss(logits_real, logits_fake):
    loss_real = torch.mean(F.relu(1. - logits_real))
    loss_fake = torch.mean(F.relu(1. + logits_fake))
    d_loss = 0.5 * (loss_real + loss_fake)
    return d_loss


def vanilla_d_loss(logits_real, logits_fake):
    d_loss = 0.5 * (
        torch.mean(torch.nn.functional.softplus(-logits_real)) +
        torch.mean(torch.nn.functional.softplus(logits_fake)))
    return d_loss

def fft_loss(pred, tgt):
    return ((torch.fft.fftn(pred, dim=(-2,-1)) - torch.fft.fftn(tgt, dim=(-2,-1)))).abs().mean()

class LPIPSWithDiscriminator(nn.Module):
    def __init__(self, disc_start, model_path, pixelloss_weight=1.0,
                 disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=0.8,
                 perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
                 disc_ndf=64, disc_loss="hinge", rec_loss="FFT",
                 style_layers = [], content_layers = ['r41']):
        super().__init__()
        assert disc_loss in ["hinge", "vanilla"]
        self.pixel_weight = pixelloss_weight
        self.perceptual_loss = LossCriterion(style_layers, content_layers, 
                                             0, perceptual_weight,
                                             model_path = model_path)

        self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
                                                 n_layers=disc_num_layers,
                                                 use_actnorm=use_actnorm,
                                                 ndf=disc_ndf
                                                 ).apply(weights_init)
        self.discriminator_iter_start = disc_start
        if disc_loss == "hinge":
            self.disc_loss = hinge_d_loss
        elif disc_loss == "vanilla":
            self.disc_loss = vanilla_d_loss
        else:
            raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
        print(f"VQLPIPSWithDiscriminator running with {disc_loss} and {rec_loss} loss.")
        self.disc_factor = disc_factor
        self.discriminator_weight = disc_weight
        self.disc_conditional = disc_conditional

        self.rec_loss = rec_loss
        self.perceptual_weight = perceptual_weight

    def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
        if last_layer is not None:
            nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
            g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
        else:
            nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
            g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]

        d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
        d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
        d_weight = d_weight * self.discriminator_weight
        return d_weight

    def forward(self, inputs, reconstructions, optimizer_idx,
                global_step, last_layer=None, cond=None, split="train"):
        if self.rec_loss == "L1":
            rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()).mean()
        elif self.rec_loss == "MSE":
            rec_loss = F.mse_loss(reconstructions, inputs)
        elif self.rec_loss == "FFT":
            rec_loss = fft_loss(inputs, reconstructions)
        elif self.rec_loss is None:
            rec_loss = 0
        else:
            raise ValueError("Unkown reconstruction loss, choices are [FFT, L1]")

        if self.perceptual_weight > 0:
            loss_dict = self.perceptual_loss(reconstructions, inputs, style = False)
            p_loss = loss_dict['content']
            rec_loss = rec_loss + p_loss
        else:
            p_loss = torch.zeros(1).cuda()
        nll_loss = rec_loss

        # adversarial loss for both branches
        if optimizer_idx == 0:
            log = {}
            disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
            # generator update
            if disc_factor > 0:
                logits_fake = self.discriminator(reconstructions.contiguous())
                g_loss = -torch.mean(logits_fake)

                try:
                    d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
                except RuntimeError:
                    #assert not self.training
                    d_weight = torch.tensor(0.0)
                loss = nll_loss + d_weight * disc_factor * g_loss
                log["d_weight"] = d_weight.detach()
                log["disc_factor"] = torch.tensor(disc_factor)
                log["g_loss"] = g_loss.detach().mean()
            else:
                loss = nll_loss

            log["total_loss"] = loss.clone().detach().mean()
            log["nll_loss"] = nll_loss.detach().mean()
            log["rec_loss"] = rec_loss.detach().mean()
            log["p_loss"] = p_loss.detach().mean()
            return loss, log

        if optimizer_idx == 1:
            # second pass for discriminator update
            logits_real = self.discriminator(inputs.contiguous().detach())
            logits_fake = self.discriminator(reconstructions.contiguous().detach())

            disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
            d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)

            log = {"disc_loss": d_loss.clone().detach().mean(),
                   "logits_real": logits_real.detach().mean(),
                   "logits_fake": logits_fake.detach().mean()
                   }
            return d_loss, log

class LPIPSWithDiscriminatorMask(nn.Module):
    def __init__(self, disc_start, model_path, pixelloss_weight=1.0,
                 disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=0.8,
                 perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
                 disc_ndf=64, disc_loss="hinge", rec_loss="FFT",
                 style_layers = [], content_layers = ['r41']):
        super().__init__()
        assert disc_loss in ["hinge", "vanilla"]
        self.pixel_weight = pixelloss_weight
        self.perceptual_loss = LossCriterionMask(style_layers, content_layers, 
                                                 0.2, perceptual_weight,
                                                 model_path = model_path)

        self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
                                                 n_layers=disc_num_layers,
                                                 use_actnorm=use_actnorm,
                                                 ndf=disc_ndf
                                                 ).apply(weights_init)
        self.discriminator_iter_start = disc_start
        if disc_loss == "hinge":
            self.disc_loss = hinge_d_loss
        elif disc_loss == "vanilla":
            self.disc_loss = vanilla_d_loss
        else:
            raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
        print(f"VQLPIPSWithDiscriminator running with {disc_loss} and {rec_loss} loss.")
        self.disc_factor = disc_factor
        self.discriminator_weight = disc_weight
        self.disc_conditional = disc_conditional

        self.rec_loss = rec_loss
        self.perceptual_weight = perceptual_weight

    def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
        if last_layer is not None:
            nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
            g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
        else:
            nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
            g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]

        d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
        d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
        d_weight = d_weight * self.discriminator_weight
        return d_weight

    def forward(self, inputs, reconstructions, optimizer_idx,
                global_step, mask, last_layer=None, cond=None, split="train"):
        if self.rec_loss == "L1":
            rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()).mean()
        elif self.rec_loss == "MSE":
            rec_loss = F.mse_loss(reconstructions, inputs)
        elif self.rec_loss == "FFT":
            rec_loss = fft_loss(inputs, reconstructions)
        elif self.rec_loss is None:
            rec_loss = 0
        else:
            raise ValueError("Unkown reconstruction loss, choices are [FFT, L1]")

        if self.perceptual_weight > 0:
            loss_dict = self.perceptual_loss(reconstructions, inputs, mask, style = True)
            p_loss = loss_dict['content']
            s_loss = loss_dict['style']
            rec_loss = rec_loss + p_loss + s_loss
        else:
            p_loss = torch.zeros(1).cuda()
        nll_loss = rec_loss

        # adversarial loss for both branches
        if optimizer_idx == 0:
            # generator update
            logits_fake = self.discriminator(reconstructions.contiguous())
            g_loss = -torch.mean(logits_fake)

            try:
                d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
            except RuntimeError:
                #assert not self.training
                d_weight = torch.tensor(0.0)

            disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
            loss = nll_loss + d_weight * disc_factor * g_loss

            log = {"total_loss": loss.clone().detach().mean(),
                   "nll_loss": nll_loss.detach().mean(),
                   "rec_loss": rec_loss.detach().mean(),
                   "p_loss": p_loss.detach().mean(),
                   "s_loss": s_loss,
                   "d_weight": d_weight.detach(),
                   "disc_factor": torch.tensor(disc_factor),
                   "g_loss": g_loss.detach().mean(),
                   }
            return loss, log

        if optimizer_idx == 1:
            # second pass for discriminator update
            logits_real = self.discriminator(inputs.contiguous().detach())
            logits_fake = self.discriminator(reconstructions.contiguous().detach())

            disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
            d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)

            log = {"disc_loss": d_loss.clone().detach().mean(),
                   "logits_real": logits_real.detach().mean(),
                   "logits_fake": logits_fake.detach().mean()
                   }
            return d_loss, log