Training using CUB Dataset and CIFAR10 not Learning #1183
Unanswered
MagicHealer
asked this question in
Q&A
Replies: 0 comments
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
Hi, I've been trying to train using two different datasets as different task. In this experiment I tried to use CUB dataset as the initial task and Cifar10 as second task. However, upon training, it is not learning. Can anyone point out what i have been doing wrong? Thank you.
Here is my source code:
!pip install avalanche-lib==0.2.1
from torch.optim import SGD
from torch.nn import CrossEntropyLoss
from avalanche.benchmarks.datasets import MNIST, CIFAR10, CUB200
from avalanche.evaluation.metrics import forgetting_metrics, accuracy_metrics,
loss_metrics, timing_metrics, cpu_usage_metrics, confusion_matrix_metrics, disk_usage_metrics
from avalanche.models import SimpleMLP, SimpleCNN
from avalanche.logging import InteractiveLogger, TextLogger, TensorboardLogger, WandBLogger
from avalanche.training.plugins import EvaluationPlugin
from avalanche.training.supervised import Naive, LwF
from avalanche.benchmarks.generators import nc_benchmark, ni_benchmark
from torchvision.transforms import Compose, ToTensor, Normalize, RandomCrop, Resize
train_transform = Compose([
Resize((224,224)),
#lambda x: x.convert("RGB"),
#RandomCrop(28, padding=4),
ToTensor(),
Normalize((0.1307,), (0.3081,))
])
test_transform = Compose([
Resize((224,224)),
lambda x: x.convert("RGB"),
#RandomCrop(28, padding=4),
ToTensor(),
Normalize((0.1307,), (0.3081,))
])
cub_train = CUB200(
'./data/mnist', train=True, download=True, transform=train_transform
)
cub_test = CUB200(
'./data/mnist', train=False, download=True, transform=test_transform
)
train_transform = Compose([
Resize((224,224)),
#RandomCrop(28, padding=4),
ToTensor(),
Normalize((0.1307,), (0.3081,))
])
test_transform = Compose([
Resize((224,224)),
#RandomCrop(28, padding=4),
ToTensor(),
Normalize((0.1307,), (0.3081,))
])
train_cifar10 = CIFAR10(
'./data/cifar10', train=True, download=True, transform= train_transform
)
test_cifar10 = CIFAR10(
'./data/cifar10', train=False, download=True, transform=test_transform
)
from avalanche.benchmarks.generators import filelist_benchmark, dataset_benchmark,
tensors_benchmark, paths_benchmark
generic_scenario = dataset_benchmark(
[cub_train, train_cifar10],
[cub_test, test_cifar10]
)
from avalanche.benchmarks.utils import AvalancheDataset
train_cub_task0 = AvalancheDataset(cub_train, task_labels=0)
test_cub_task0 = AvalancheDataset(cub_test, task_labels=0)
train_cifar10_task1 = AvalancheDataset(train_cifar10, task_labels=1)
test_cifar10_task1 = AvalancheDataset(test_cifar10, task_labels=1)
scenario_custom_task_labels = dataset_benchmark(
[train_cub_task0, train_cifar10_task1],
[test_cub_task0, test_cifar10_task1]
)
print('Without custom task labels:',
generic_scenario.train_stream[1].task_label)
print('With custom task labels:',
scenario_custom_task_labels.train_stream[1].task_label)
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"[INFO]: Computation device: {device}")
import torchvision
model=torchvision.models.alexnet(num_classes=200)
tb_logger = TensorboardLogger()
text_logger = TextLogger(open('log.txt', 'a'))
interactive_logger = InteractiveLogger()
eval_plugin = EvaluationPlugin(
accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),
loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
timing_metrics(epoch=True, epoch_running=True),
forgetting_metrics(experience=True, stream=True),
# cpu_usage_metrics(experience=True),
# disk_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True),
loggers=[interactive_logger, text_logger, tb_logger]#,wandb_logger]
)
cl_strategy = LwF(
model, SGD(model.parameters(), lr=0.001, momentum=0.9),
CrossEntropyLoss(), device=device, train_mb_size=128, train_epochs=20, eval_mb_size=128,
evaluator=eval_plugin, alpha=0.1, temperature=2)
print("Starting experiment...")
results = []
for experience in scenario_custom_task_labels.train_stream:
print("Start of Training for Task ", experience.current_experience)
cl_strategy.train(experience)
print("Training completed")
print("Computing accuracy on the whole test set")
results.append(cl_strategy.eval(scenario_custom_task_labels.test_stream))
and here is a sample result of training:

Beta Was this translation helpful? Give feedback.
All reactions