diff --git a/DCGAN_with_GPU_Google_Colab.ipynb b/DCGAN_with_GPU_Google_Colab.ipynb new file mode 100644 index 0000000..eb6f4f5 --- /dev/null +++ b/DCGAN_with_GPU_Google_Colab.ipynb @@ -0,0 +1,677 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "DCGAN with GPU - Google Colab.ipynb", + "version": "0.3.2", + "provenance": [], + "collapsed_sections": [], + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "[View in Colaboratory](https://colab.research.google.com/github/maxmatical/pytorch-projects/blob/generative-models/DCGAN_with_GPU_Google_Colab.ipynb)" + ] + }, + { + "metadata": { + "id": "jU1r3xFhO3r5", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 54 + }, + "outputId": "fa9282d5-f322-4e97-bbc5-2dddccdb111a" + }, + "cell_type": "code", + "source": [ + "# install pytorch 0.4.1 with gpu\n", + "\n", + "from os import path\n", + "from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag\n", + "platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())\n", + "\n", + "accelerator = 'cu80' if path.exists('/opt/bin/nvidia-smi') else 'cpu'\n", + "\n", + "!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "tcmalloc: large alloc 1073750016 bytes == 0x5635c000 @ 0x7f22ad7181c4 0x46d6a4 0x5fcbcc 0x4c494d 0x54f3c4 0x553aaf 0x54e4c8 0x54f4f6 0x553aaf 0x54efc1 0x54f24d 0x553aaf 0x54efc1 0x54f24d 0x553aaf 0x54efc1 0x54f24d 0x551ee0 0x54e4c8 0x54f4f6 0x553aaf 0x54efc1 0x54f24d 0x551ee0 0x54efc1 0x54f24d 0x551ee0 0x54e4c8 0x54f4f6 0x553aaf 0x54e4c8\r\n" + ], + "name": "stdout" + } + ] + }, + { + "metadata": { + "id": "x1ErFHmrMDkV", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 84 + }, + "outputId": "c09afed0-cc1e-4eff-ccb0-42c9493264b0" + }, + "cell_type": "code", + "source": [ + "import torch\n", + "print(torch.__version__)\n", + "print(torch.cuda.is_available())\n", + "print('Torch', torch.__version__, 'CUDA', torch.version.cuda)\n", + "print('Device:', torch.device('cuda:0'))\n" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "text": [ + "0.4.1\n", + "True\n", + "Torch 0.4.1 CUDA 8.0.61\n", + "Device: cuda:0\n" + ], + "name": "stdout" + } + ] + }, + { + "metadata": { + "id": "FMk_Dd0SMgS1", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 84 + }, + "outputId": "493ade85-2b6f-49d2-9f0a-9491555624f7" + }, + "cell_type": "code", + "source": [ + "# install required packagles\n", + "!pip install torchsummary" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Collecting torchsummary\n", + " Downloading https://files.pythonhosted.org/packages/2a/61/21b44bb29aedb820fec4716a102e802397f0c21512764a9d98206c17417d/torchsummary-1.4-py3-none-any.whl\n", + "Installing collected packages: torchsummary\n", + "Successfully installed torchsummary-1.4\n" + ], + "name": "stdout" + } + ] + }, + { + "metadata": { + "id": "c8qbn6taMF8O", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "import os\n", + "import torch\n", + "import torchvision\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "from torchvision import transforms\n", + "from torchvision.utils import save_image\n", + "\n", + "\n", + "from torchsummary import summary" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "OupEpCFeMfS_", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "# create directory\n", + "\n", + "sample_dir = 'samples'\n", + "if not os.path.exists(sample_dir):\n", + " os.makedirs(sample_dir)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "Qdoaeq-7MHf9", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "# loading and preprocessing MNIST\n", + "#batchsize is 128 from paper\n", + "bs = 128\n", + "img_size = 64 # scale up imgs\n", + "\n", + "transform = transforms.Compose([\n", + " transforms.Resize(img_size),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize(mean=(0.5, 0.5, 0.5), # 3 for RGB channels\n", + " std=(0.5, 0.5, 0.5))])\n", + "\n", + "# MNIST dataset\n", + "mnist = torchvision.datasets.MNIST(root='../../data/',\n", + " train=True,\n", + " transform=transform,\n", + " download=True)\n", + "\n", + "# Data loader\n", + "data_loader = torch.utils.data.DataLoader(dataset=mnist,\n", + " batch_size=bs, \n", + " shuffle=True,\n", + " num_workers=2)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "KjgZ9bNpMqHv", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "\n", + "#Discriminator\n", + "class discriminator(nn.Module):\n", + " def __init__(self, bs):\n", + " super(discriminator, self).__init__()\n", + " self.bs = bs\n", + " self.conv1 = nn.Conv2d(1, bs, 4, stride=2, padding = 1) # no bn after conv1, input dim is 1\n", + " self.conv2 = nn.Conv2d(bs, bs*2, 4, stride = 2, padding = 1)\n", + " self.bn2 = nn.BatchNorm2d(bs*2) #256 channels\n", + " self.conv3 = nn.Conv2d(bs*2, bs*4, 4, stride = 2, padding = 1)\n", + " self.bn3 = nn.BatchNorm2d(bs*4) #512 channels\n", + " self.conv4 = nn.Conv2d(bs*4, bs*8, 4, stride = 2, padding = 1)\n", + " self.bn4 = nn.BatchNorm2d(bs*8) #1024 channels\n", + " self.conv5 = nn.Conv2d(bs*8, 1, 4, stride = 1, padding = 0)\n", + " self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n", + " #self.sigmoid = nn.Sigmoid()\n", + " \n", + " def forward(self, input):\n", + " out = self.leaky_relu(self.conv1(input))\n", + " out = self.leaky_relu(self.bn2(self.conv2(out)))\n", + " out = self.leaky_relu(self.bn3(self.conv3(out)))\n", + " out = self.leaky_relu(self.bn4(self.conv4(out)))\n", + " out = F.sigmoid((self.conv5(out)))\n", + " \n", + " return out\n", + " \n", + "D = discriminator(bs)\n", + " \n", + "# Generator\n", + "class generator(nn.Module):\n", + " def __init__(self, bs):\n", + " super(generator, self).__init__()\n", + " self.bs = bs\n", + " self.deconv1 = nn.ConvTranspose2d(100, bs*8, 4, stride = 1, padding = 0) # 100 in, 1024 outchannels\n", + " self.bn1 = nn.BatchNorm2d(bs*8) #1024 channels\n", + " self.deconv2 = nn.ConvTranspose2d(bs*8, bs*4, 4, stride = 2, padding = 1)\n", + " self.bn2 = nn.BatchNorm2d(bs*4) \n", + " self.deconv3 = nn.ConvTranspose2d(bs*4, bs*2, 4, stride = 2, padding = 1)\n", + " self.bn3 = nn.BatchNorm2d(bs*2) \n", + " self.deconv4 = nn.ConvTranspose2d(bs*2, bs, 4, stride = 2, padding = 1)\n", + " self.bn4 = nn.BatchNorm2d(bs) \n", + " self.deconv5 = nn.ConvTranspose2d(bs, 1, 4, stride=2, padding = 1)\n", + " self.relu = nn.ReLU(inplace=True)\n", + " #self.tanh = nn.Tanh()\n", + " def forward(self, input):\n", + " out = self.relu(self.bn1(self.deconv1(input)))\n", + " out = self.relu(self.bn2(self.deconv2(out)))\n", + " out = self.relu(self.bn3(self.deconv3(out)))\n", + " out = self.relu(self.bn4(self.deconv4(out)))\n", + " out = F.tanh((self.deconv5(out)))\n", + " return out\n", + "G = generator(bs)\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "qQvIR6BsNExy", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "# enable GPU\n", + "use_cuda = True" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "fFHPRUn3NQ0g", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "# initialization for the networks\n", + "def init_weight(m):\n", + " if isinstance(m, nn.Conv2d):\n", + " nn.init.normal_(m.weight, mean=0, std=0.02)\n", + " elif isinstance(m, nn.ConvTranspose2d):\n", + " nn.init.normal_(m.weight, mean=0, std=0.02)\n", + " elif type(m) == nn.BatchNorm2d:\n", + " torch.nn.init.constant_(m.weight, 1)\n", + " torch.nn.init.constant_(m.bias, 1)\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "A0Y6hRz6NNdc", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "if use_cuda and torch.cuda.is_available():\n", + " G.cuda()\n", + " D.cuda()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "O2dkp7DEfBu0", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 50 + }, + "outputId": "e9f56552-721b-49a3-8f72-008d27dc64cd" + }, + "cell_type": "code", + "source": [ + "# check if models are on cuda\n", + "print(next(G.parameters()).is_cuda)\n", + "print(next(D.parameters()).is_cuda)\n" + ], + "execution_count": 26, + "outputs": [ + { + "output_type": "stream", + "text": [ + "True\n", + "True\n" + ], + "name": "stdout" + } + ] + }, + { + "metadata": { + "id": "ilF7vZ63NT9H", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 202 + }, + "outputId": "63109b4d-3659-4d1f-b09b-e95490d0d49a" + }, + "cell_type": "code", + "source": [ + "# apply initializer\n", + "\n", + "G.apply(init_weight)\n", + "D.apply(init_weight)" + ], + "execution_count": 27, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "discriminator(\n", + " (conv1): Conv2d(1, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n", + " (conv2): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv3): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n", + " (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv4): Conv2d(512, 1024, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n", + " (bn4): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv5): Conv2d(1024, 1, kernel_size=(4, 4), stride=(1, 1))\n", + " (leaky_relu): LeakyReLU(negative_slope=0.2, inplace)\n", + ")" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 27 + } + ] + }, + { + "metadata": { + "id": "jeNFTU6xNVSp", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 470 + }, + "outputId": "118d5eaa-1a8e-4c0f-ab40-bfa4b5d6a125" + }, + "cell_type": "code", + "source": [ + "#summary of networks\n", + "#summary(G, (100))\n", + "summary(D, (1, 64, 64))" + ], + "execution_count": 28, + "outputs": [ + { + "output_type": "stream", + "text": [ + "----------------------------------------------------------------\n", + " Layer (type) Output Shape Param #\n", + "================================================================\n", + " Conv2d-1 [-1, 128, 32, 32] 2,176\n", + " LeakyReLU-2 [-1, 128, 32, 32] 0\n", + " Conv2d-3 [-1, 256, 16, 16] 524,544\n", + " BatchNorm2d-4 [-1, 256, 16, 16] 512\n", + " LeakyReLU-5 [-1, 256, 16, 16] 0\n", + " Conv2d-6 [-1, 512, 8, 8] 2,097,664\n", + " BatchNorm2d-7 [-1, 512, 8, 8] 1,024\n", + " LeakyReLU-8 [-1, 512, 8, 8] 0\n", + " Conv2d-9 [-1, 1024, 4, 4] 8,389,632\n", + " BatchNorm2d-10 [-1, 1024, 4, 4] 2,048\n", + " LeakyReLU-11 [-1, 1024, 4, 4] 0\n", + " Conv2d-12 [-1, 1, 1, 1] 16,385\n", + "================================================================\n", + "Total params: 11,033,985\n", + "Trainable params: 11,033,985\n", + "Non-trainable params: 0\n", + "----------------------------------------------------------------\n", + "Input size (MB): 0.02\n", + "Forward/backward pass size (MB): 4.63\n", + "Params size (MB): 42.09\n", + "Estimated Total Size (MB): 46.73\n", + "----------------------------------------------------------------\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1006: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n", + " warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n" + ], + "name": "stderr" + } + ] + }, + { + "metadata": { + "id": "bgjuSe0mt7cO", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 487 + }, + "outputId": "37bcc7ce-0841-4550-8eb4-8d68dc028c24" + }, + "cell_type": "code", + "source": [ + "summary(G, (100,1,1))" + ], + "execution_count": 29, + "outputs": [ + { + "output_type": "stream", + "text": [ + "----------------------------------------------------------------\n", + " Layer (type) Output Shape Param #\n", + "================================================================\n", + " ConvTranspose2d-1 [-1, 1024, 4, 4] 1,639,424\n", + " BatchNorm2d-2 [-1, 1024, 4, 4] 2,048\n", + " ReLU-3 [-1, 1024, 4, 4] 0\n", + " ConvTranspose2d-4 [-1, 512, 8, 8] 8,389,120\n", + " BatchNorm2d-5 [-1, 512, 8, 8] 1,024\n", + " ReLU-6 [-1, 512, 8, 8] 0\n", + " ConvTranspose2d-7 [-1, 256, 16, 16] 2,097,408\n", + " BatchNorm2d-8 [-1, 256, 16, 16] 512\n", + " ReLU-9 [-1, 256, 16, 16] 0\n", + " ConvTranspose2d-10 [-1, 128, 32, 32] 524,416\n", + " BatchNorm2d-11 [-1, 128, 32, 32] 256\n", + " ReLU-12 [-1, 128, 32, 32] 0\n", + " ConvTranspose2d-13 [-1, 1, 64, 64] 2,049\n", + "================================================================\n", + "Total params: 12,656,257\n", + "Trainable params: 12,656,257\n", + "Non-trainable params: 0\n", + "----------------------------------------------------------------\n", + "Input size (MB): 0.00\n", + "Forward/backward pass size (MB): 5.66\n", + "Params size (MB): 48.28\n", + "Estimated Total Size (MB): 53.94\n", + "----------------------------------------------------------------\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:995: UserWarning: nn.functional.tanh is deprecated. Use torch.tanh instead.\n", + " warnings.warn(\"nn.functional.tanh is deprecated. Use torch.tanh instead.\")\n" + ], + "name": "stderr" + } + ] + }, + { + "metadata": { + "id": "bLhYpLNINW5m", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "# define loss and optimizers\n", + "criterion = nn.BCELoss()\n", + "d_opt = torch.optim.Adam(D.parameters(), lr = 0.0002, betas=(0.5, 0.999))\n", + "g_opt = torch.optim.Adam(G.parameters(), lr = 0.0002, betas=(0.5, 0.999))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "w0EDBZyhNZWX", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "# denorm for plotting\n", + "def denorm(x):\n", + " out = (x + 1) / 2\n", + " return out.clamp(0, 1)\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "cuGP2bxkNaiU", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 770 + }, + "outputId": "a9cace4b-6295-4674-9e67-21efc1455b9a" + }, + "cell_type": "code", + "source": [ + "# training\n", + "n_epochs = 20\n", + "total_step = len(data_loader)\n", + "for epoch in range(n_epochs):\n", + " for i, (images, _) in enumerate(data_loader):\n", + " \n", + " #create labels (1 for real img, 0 for fake img)\n", + " \n", + " real_labels = torch.ones(bs).view(-1,1,1,1) \n", + " fake_labels = torch.zeros(bs).view(-1,1,1,1)\n", + " \n", + " #enabling cuda\n", + " if use_cuda and torch.cuda.is_available():\n", + " images = images.cuda()\n", + " real_labels = real_labels.cuda()\n", + " fake_labels = fake_labels.cuda()\n", + " \n", + " \n", + " \n", + " \n", + " ########################\n", + " # training discrimiator\n", + " ########################\n", + " \n", + " # zeroing gradientsy\n", + " d_opt.zero_grad()\n", + " \n", + " # loss for real images\n", + " outputs = D(images).view(-1,1,1,1)\n", + " d_loss_real = criterion(outputs, real_labels)\n", + " real_score = outputs\n", + " \n", + " # loss for fake images\n", + " z = torch.randn(bs, 100).view(-1, 100, 1,1) # 100 is input channels for G\n", + " if use_cuda and torch.cuda.is_available():\n", + " z = z.cuda()\n", + " fake_img = G(z)\n", + " outputs = D(fake_img).view(-1,1,1,1)\n", + " d_loss_fake = criterion(outputs, fake_labels)\n", + " fake_score = outputs\n", + " \n", + " # total loss\n", + " total_loss = d_loss_real + d_loss_fake\n", + "\n", + " total_loss.backward()\n", + " d_opt.step()\n", + " \n", + " ########################\n", + " # training generator\n", + " ########################\n", + " # zeroing gradients\n", + " g_opt.zero_grad()\n", + " \n", + " # generating fake imgs \n", + " z = torch.randn(bs, 100).view(-1, 100, 1, 1)\n", + " if use_cuda and torch.cuda.is_available():\n", + " z = z.cuda()\n", + " fake_img = G(z)\n", + " outputs = D(fake_img).view(-1,1,1,1)\n", + " \n", + " # loss for generator\n", + " g_loss = criterion(outputs, real_labels)\n", + " g_loss.backward()\n", + " g_opt.step()\n", + " \n", + " if (i+5) % 100 == 0:\n", + " print('Epoch [{}/{}], Step [{}/{}], d_loss: {:.4f}, g_loss: {:.4f}, D(x): {:.2f}, D(G(z)): {:.2f}' \n", + " .format(epoch+1, n_epochs, i+1, total_step, total_loss.item(), g_loss.item(), \n", + " real_score.mean().item(), fake_score.mean().item()))\n", + "\n", + "\n", + "# # Save the model checkpoints \n", + "# #torch.save(G.state_dict(), 'G.ckpt')\n", + "# #torch.save(D.state_dict(), 'D.ckpt')\n" + ], + "execution_count": 32, + "outputs": [ + { + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1006: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n", + " warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n", + "/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:995: UserWarning: nn.functional.tanh is deprecated. Use torch.tanh instead.\n", + " warnings.warn(\"nn.functional.tanh is deprecated. Use torch.tanh instead.\")\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "Epoch [1/20], Step [96/469], d_loss: 0.5494, g_loss: 5.2465, D(x): 0.95, D(G(z)): 0.23\n", + "Epoch [1/20], Step [196/469], d_loss: 1.2054, g_loss: 5.1388, D(x): 0.86, D(G(z)): 0.54\n", + "Epoch [1/20], Step [296/469], d_loss: 1.2517, g_loss: 4.7196, D(x): 0.91, D(G(z)): 0.61\n", + "Epoch [1/20], Step [396/469], d_loss: 1.4117, g_loss: 0.7615, D(x): 0.39, D(G(z)): 0.06\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1594: UserWarning: Using a target size (torch.Size([128, 1, 1, 1])) that is different to the input size (torch.Size([96, 1, 1, 1])) is deprecated. Please ensure they have the same size.\n", + " \"Please ensure they have the same size.\".format(target.size(), input.size()))\n" + ], + "name": "stderr" + }, + { + "output_type": "error", + "ename": "ValueError", + "evalue": "ignored", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;31m# loss for real images\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mD\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimages\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mview\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 29\u001b[0;31m \u001b[0md_loss_real\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreal_labels\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 30\u001b[0m \u001b[0mreal_score\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 475\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 476\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 477\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 478\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 479\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/torch/nn/modules/loss.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input, target)\u001b[0m\n\u001b[1;32m 484\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 485\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 486\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbinary_cross_entropy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweight\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreduction\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreduction\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 487\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 488\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py\u001b[0m in \u001b[0;36mbinary_cross_entropy\u001b[0;34m(input, target, weight, size_average, reduce, reduction)\u001b[0m\n\u001b[1;32m 1595\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnelement\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnelement\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1596\u001b[0m raise ValueError(\"Target and input must have the same number of elements. target nelement ({}) \"\n\u001b[0;32m-> 1597\u001b[0;31m \"!= input nelement ({})\".format(target.nelement(), input.nelement()))\n\u001b[0m\u001b[1;32m 1598\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1599\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mweight\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mValueError\u001b[0m: Target and input must have the same number of elements. target nelement (128) != input nelement (96)" + ] + } + ] + }, + { + "metadata": { + "id": "G1IRJYYMNcj-", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "" + ], + "execution_count": 0, + "outputs": [] + } + ] +} \ No newline at end of file