Skip to content

Commit 4530009

Browse files
committed
Change names to image, target and output
1 parent 501f4df commit 4530009

File tree

4 files changed

+24
-24
lines changed

4 files changed

+24
-24
lines changed

demo.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -37,21 +37,21 @@
3737
groundtruth_dir = os.path.join('demo', 'groundtruth')
3838
os.makedirs(result_dir, exist_ok=True)
3939
os.makedirs(groundtruth_dir, exist_ok=True)
40-
for images, masks in tqdm.tqdm(testloader, desc='Demo'):
40+
for image, target in tqdm.tqdm(testloader, desc='Demo'):
4141
# mask에 255를 곱하여 0~1 사이의 값을 0~255 값으로 변경 + 채널 차원 제거
42-
masks.mul_(255).squeeze_(dim=1)
42+
target.mul_(255).squeeze_(dim=1)
4343

44-
images, masks = images.to(device), masks.type(torch.LongTensor)
44+
image, target = image.to(device), target.type(torch.LongTensor)
4545

4646
# 예측
4747
with torch.no_grad():
48-
masks_pred = model(images)
49-
masks_pred = F.log_softmax(masks_pred, dim=1)
50-
masks_pred = torch.argmax(masks_pred, dim=1)
48+
output = model(image)
49+
output = F.log_softmax(output, dim=1)
50+
output = torch.argmax(output, dim=1)
5151

5252
# 1 배치단위 처리
53-
assert masks.shape[0] == masks_pred.shape[0]
54-
for i in range(masks.shape[0]):
55-
plt.imsave(os.path.join(result_dir, image_names[step]), masks_pred[i].cpu(), cmap=cmap)
56-
plt.imsave(os.path.join(groundtruth_dir, image_names[step]), masks[i], cmap=cmap)
53+
assert target.shape[0] == output.shape[0]
54+
for i in range(target.shape[0]):
55+
plt.imsave(os.path.join(result_dir, image_names[step]), output[i].cpu(), cmap=cmap)
56+
plt.imsave(os.path.join(groundtruth_dir, image_names[step]), target[i], cmap=cmap)
5757
step += 1

eval.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -48,27 +48,27 @@ def evaluate(model, testloader, criterion, num_classes: int, device):
4848
metrics = EvaluationMetrics(num_classes)
4949
val_loss = 0
5050
inference_time = 0
51-
for images, masks in tqdm.tqdm(testloader, desc='Eval', leave=False):
51+
for image, target in tqdm.tqdm(testloader, desc='Eval', leave=False):
5252
# mask에 255를 곱하여 0~1 사이의 값을 0~255 값으로 변경 + 채널 차원 제거
53-
masks.mul_(255).squeeze_(dim=1)
53+
target.mul_(255).squeeze_(dim=1)
5454

55-
images, masks = images.to(device), masks.to(device, dtype=torch.int64)
55+
image, target = image.to(device), target.to(device, dtype=torch.int64)
5656

5757
# 예측
5858
with torch.no_grad():
5959
start_time = time.time()
60-
masks_pred = model(images)
60+
output = model(image)
6161
inference_time += time.time() - start_time
6262

6363
# validation loss를 모두 합침
64-
val_loss += criterion(masks_pred, masks).item()
64+
val_loss += criterion(output, target).item()
6565

6666
# Segmentation map 만들기
67-
masks_pred = F.log_softmax(masks_pred, dim=1)
68-
masks_pred = torch.argmax(masks_pred, dim=1)
67+
output = F.log_softmax(output, dim=1)
68+
output = torch.argmax(output, dim=1)
6969

7070
# 혼동행렬 업데이트
71-
metrics.update_matrix(masks, masks_pred)
71+
metrics.update_matrix(target, output)
7272

7373
# 평가 점수 가져오기
7474
iou, miou = metrics.get_scores(ignore_first_label=True)

feature_visualizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def hook(model, input, output):
7070

7171
# 예측
7272
with torch.no_grad():
73-
mask_pred = model(image)
73+
output = model(image)
7474

7575
# 각 계층의 feature maps 저장
7676
for layer in tqdm.tqdm(feature_maps.keys(), desc='Saving'):

train.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,16 +37,16 @@
3737
for epoch in tqdm.tqdm(range(config[config['model']]['epoch']), desc='Epoch'):
3838
model.train()
3939

40-
for batch_idx, (images, masks) in enumerate(tqdm.tqdm(trainloader, desc='Train', leave=False)):
40+
for batch_idx, (image, target) in enumerate(tqdm.tqdm(trainloader, desc='Train', leave=False)):
4141
# mask에 255를 곱하여 0~1 사이의 값을 0~255 값으로 변경 + 채널 차원 제거
42-
masks.mul_(255).squeeze_(dim=1)
42+
target.mul_(255).squeeze_(dim=1)
4343

44-
images, masks = images.to(device), masks.to(device, dtype=torch.int64)
44+
image, target = image.to(device), target.to(device, dtype=torch.int64)
4545

4646
# 순전파 + 역전파 + 최적화
4747
optimizer.zero_grad()
48-
masks_pred = model(images)
49-
loss = criterion(masks_pred, masks)
48+
output = model(image)
49+
loss = criterion(output, target)
5050
loss.backward()
5151
optimizer.step()
5252

0 commit comments

Comments
 (0)