CLarification in dataset_benchmark and generic scenario #1162
Unanswered
MagicHealer
asked this question in
Q&A
Replies: 1 comment 1 reply
-
The script looks ok. Are you sure that you are using the GPU? |
Beta Was this translation helpful? Give feedback.
1 reply
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
Hi, Can you please verify if I used dataset_benchmark in a right way? If so, why does it takes so long to train even for just 1 epoch? I am using GPU. ! epoch finishes after about 2 hrs. Thank you
CODE:
from torch.optim import SGD
from torch.nn import CrossEntropyLoss
from avalanche.benchmarks.datasets import MNIST, CIFAR10
from avalanche.evaluation.metrics import forgetting_metrics, accuracy_metrics,
loss_metrics, timing_metrics, cpu_usage_metrics, confusion_matrix_metrics, disk_usage_metrics
from avalanche.models import SimpleMLP, SimpleCNN
from avalanche.logging import InteractiveLogger, TextLogger, TensorboardLogger, WandBLogger
from avalanche.training.plugins import EvaluationPlugin
from avalanche.training.supervised import Naive
from avalanche.benchmarks.generators import nc_benchmark, ni_benchmark
from torchvision.transforms import Compose, ToTensor, Normalize, RandomCrop, Resize
train_transform = Compose([
#Resize(224),
lambda x: x.convert("RGB"),
RandomCrop(28, padding=4),
ToTensor(),
Normalize((0.1307,), (0.3081,))
])
test_transform = Compose([
#Resize(224),
lambda x: x.convert("RGB"),
RandomCrop(28, padding=4),
ToTensor(),
Normalize((0.1307,), (0.3081,))
])
mnist_train = MNIST(
'./data/mnist', train=True, download=True, transform=train_transform
)
mnist_test = MNIST(
'./data/mnist', train=False, download=True, transform=test_transform
)
train_transform1 = Compose([
#Resize(224),
RandomCrop(28, padding=4),
ToTensor(),
Normalize((0.1307,), (0.3081,))
])
test_transform1 = Compose([
#Resize(224),
RandomCrop(28, padding=4),
ToTensor(),
Normalize((0.1307,), (0.3081,))
])
train_cifar10 = CIFAR10(
'./data/cifar10', train=True, download=True, transform= train_transform1
)
test_cifar10 = CIFAR10(
'./data/cifar10', train=False, download=True, transform=test_transform1
)
from avalanche.benchmarks.generators import filelist_benchmark, dataset_benchmark, tensors_benchmark, paths_benchmark
generic_scenario = dataset_benchmark(
[mnist_train, train_cifar10],
[mnist_test, test_cifar10]
)
import torch
device = torch.device(
f"cuda:2"
if torch.cuda.is_available()
else "cpu"
)
print(device)
import torchvision
model=torchvision.models.vgg16(num_classes=10)
print(model)
tb_logger = TensorboardLogger()
text_logger = TextLogger(open('log.txt', 'a'))
interactive_logger = InteractiveLogger()
wandb_logger=WandBLogger()
eval_plugin = EvaluationPlugin(
accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),
loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
timing_metrics(epoch=True, epoch_running=True),
forgetting_metrics(experience=True, stream=True),
cpu_usage_metrics(experience=True),
disk_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True),
loggers=[interactive_logger, text_logger, tb_logger]#,wandb_logger]
)
criterion = CrossEntropyLoss()
optimizer = SGD(model.parameters(), lr=0.01)
cl_strategy = Naive(
model,
optimizer,
criterion,
device=device,
train_mb_size=350,
train_epochs=10, eval_mb_size=128,
evaluator=eval_plugin)
print("Starting experiment...")
results = []
for experience in generic_scenario.train_stream:
print("Start of Training for Task ", experience.current_experience)
cl_strategy.train(experience)
print("Training completed")
print("Computing accuracy on the whole test set")
results.append(cl_strategy.eval(generic_scenario.test_stream))
Beta Was this translation helpful? Give feedback.
All reactions