From bb861ff18c445be84f9b0e5c4f425e4a18abb199 Mon Sep 17 00:00:00 2001 From: Benson Muite Date: Sat, 19 Jun 2021 08:39:51 +0300 Subject: [PATCH 1/3] remove check_random_state, Mxnet -> PyTorch Related pull request at https://github.com/JeanKossaifi/caltech-tutorial-19/pull/1 --- tensor_regression_layer.ipynb | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tensor_regression_layer.ipynb b/tensor_regression_layer.ipynb index b5ba132..5d5c950 100644 --- a/tensor_regression_layer.ipynb +++ b/tensor_regression_layer.ipynb @@ -6,7 +6,7 @@ "source": [ "# Tensor Regression Networks with ``TensorLy`` and ``PyTorch`` as a backend\n", "\n", - "In this notebook, we will show how to combine TensorLy and MXNet in to implement the tensor regression layer, as defined in **Tensor Contraction & Regression Networks**, _Jean Kossaifi, Zachary C. Lipton, Aran Khanna, Tommaso Furlanello and Anima Anandkumar_, [ArXiV pre-publication](https://arxiv.org/abs/1707.08308).\n", + "In this notebook, we will show how to combine TensorLy and PyTorch in to implement the tensor regression layer, as defined in **Tensor Contraction & Regression Networks**, _Jean Kossaifi, Zachary C. Lipton, Aran Khanna, Tommaso Furlanello and Anima Anandkumar_, [ArXiV pre-publication](https://arxiv.org/abs/1707.08308).\n", "\n", "\n", "Specifically, we use [TensorLy](http://tensorly.org/dev/index.html) for the tensor operations, with the [PyTorch](http://pytorch.org/) backend.\n", @@ -124,7 +124,7 @@ " return F.log_softmax(x)\n", "```\n", "\n", - "In this notebook, we will demonstrate how to implement easily the TRL using TensorLy and MXNet." + "In this notebook, we will demonstrate how to implement easily the TRL using TensorLy and PyTorch." ] }, { @@ -150,8 +150,7 @@ "import numpy as np\n", "\n", "import tensorly as tl\n", - "from tensorly.tenalg import inner\n", - "from tensorly.random import check_random_state" + "from tensorly.tenalg import inner" ] }, { @@ -187,7 +186,7 @@ "batch_size = 16\n", "device = 'cuda:0'\n", "# to run on CPU, uncomment the following line:\n", - "device = 'cpu'" + "# device = 'cpu'" ] }, { From d9e9ba30d53aef8c4cb3d4e8685462edf1f9f6fa Mon Sep 17 00:00:00 2001 From: Benson Muite Date: Sat, 19 Jun 2021 08:47:35 +0300 Subject: [PATCH 2/3] remove unused check_random_state --- tt-compression.ipynb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tt-compression.ipynb b/tt-compression.ipynb index 1893ad6..24f7ca9 100644 --- a/tt-compression.ipynb +++ b/tt-compression.ipynb @@ -32,8 +32,7 @@ "\n", "import numpy as np\n", "\n", - "import tensorly as tl\n", - "from tensorly.random import check_random_state" + "import tensorly as tl" ] }, { From 7ba8d43e0f14981c0cf216d82c1a02638e62dafb Mon Sep 17 00:00:00 2001 From: Benson Muite Date: Sat, 19 Jun 2021 19:28:52 +0300 Subject: [PATCH 3/3] fix typo reduce regularization constant to ensure convergence remove unused library --- tensor_regression_layer.ipynb | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tensor_regression_layer.ipynb b/tensor_regression_layer.ipynb index 5d5c950..52725bd 100644 --- a/tensor_regression_layer.ipynb +++ b/tensor_regression_layer.ipynb @@ -142,14 +142,12 @@ "source": [ "import torch\n", "import torch.nn as nn\n", - "from torch.autograd import Variable\n", "import torch.optim as optim\n", "from torchvision import datasets, transforms\n", "import torch.nn.functional as F\n", "\n", "import numpy as np\n", "\n", - "import tensorly as tl\n", "from tensorly.tenalg import inner" ] }, @@ -273,7 +271,7 @@ " def penalty(self, order=2):\n", " penalty = tl.norm(self.core, order)\n", " for f in self.factors:\n", - " penatly = penalty + tl.norm(f, order)\n", + " penalty = penalty + tl.norm(f, order)\n", " return penalty\n" ] }, @@ -394,7 +392,7 @@ ], "source": [ "n_epoch = 5 # Number of epochs\n", - "regularizer = 0.001\n", + "regularizer = 0.0001\n", "\n", "model = model.to(device)\n", "\n",