Skip to content

Commit 015fbe4

Browse files
committed
Merge branch 'MengqingCao-npu_support' into device_amp_cleanup
2 parents 1766a01 + 81b59fa commit 015fbe4

File tree

4 files changed

+22
-5
lines changed

4 files changed

+22
-5
lines changed

timm/data/loader.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,13 +113,17 @@ def __init__(
113113
)
114114
else:
115115
self.random_erasing = None
116-
self.is_cuda = torch.cuda.is_available() and device.type == 'cuda'
116+
self.is_cuda = device.type == 'cuda' and torch.cuda.is_available()
117+
self.is_npu = device.type == 'npu' and torch.npu.is_available()
117118

118119
def __iter__(self):
119120
first = True
120121
if self.is_cuda:
121122
stream = torch.cuda.Stream()
122123
stream_context = partial(torch.cuda.stream, stream=stream)
124+
elif self.is_npu:
125+
stream = torch.npu.Stream()
126+
stream_context = partial(torch.npu.stream, stream=stream)
123127
else:
124128
stream = None
125129
stream_context = suppress
@@ -139,7 +143,10 @@ def __iter__(self):
139143
first = False
140144

141145
if stream is not None:
142-
torch.cuda.current_stream().wait_stream(stream)
146+
if self.is_cuda:
147+
torch.cuda.current_stream().wait_stream(stream)
148+
elif self.is_npu:
149+
torch.npu.current_stream().wait_stream(stream)
143150

144151
input = next_input
145152
target = next_target

timm/utils/distributed.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,7 @@ def init_distributed_device_so(
116116
"xpu": "ccl",
117117
"hpu": "hccl",
118118
"cuda": "nccl",
119+
"npu": "hccl",
119120
}
120121
dist_backend = dist_backends.get(device_type, 'gloo')
121122
dist_url = dist_url or 'env://'
@@ -159,6 +160,8 @@ def init_distributed_device_so(
159160

160161
if device_type == 'cuda':
161162
assert torch.cuda.is_available(), f'CUDA is not available but {device} was specified.'
163+
if device_type == 'npu':
164+
assert torch.npu.is_available(), f'Ascend NPU is not available but {device} was specified.'
162165

163166
if distributed and device != 'cpu':
164167
# Ignore manually specified device index in distributed mode and

train.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1042,8 +1042,11 @@ def _backward(_loss):
10421042
if model_ema is not None:
10431043
model_ema.update(model, step=num_updates)
10441044

1045-
if args.synchronize_step and device.type == 'cuda':
1046-
torch.cuda.synchronize()
1045+
if args.synchronize_step:
1046+
if device.type == 'cuda':
1047+
torch.cuda.synchronize()
1048+
elif device.type == 'npu':
1049+
torch.npu.synchronize()
10471050
time_now = time.time()
10481051
update_time_m.update(time.time() - update_start_time)
10491052
update_start_time = time_now
@@ -1143,6 +1146,8 @@ def validate(
11431146

11441147
if device.type == 'cuda':
11451148
torch.cuda.synchronize()
1149+
elif device.type == "npu":
1150+
torch.npu.synchronize()
11461151

11471152
losses_m.update(reduced_loss.item(), input.size(0))
11481153
top1_m.update(acc1.item(), output.size(0))

validate.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -387,8 +387,10 @@ def _try_run(args, initial_batch_size):
387387
while batch_size:
388388
args.batch_size = batch_size * args.num_gpu # multiply by num-gpu for DataParallel case
389389
try:
390-
if torch.cuda.is_available() and 'cuda' in args.device:
390+
if 'cuda' in args.device and torch.cuda.is_available():
391391
torch.cuda.empty_cache()
392+
elif "npu" in args.device and torch.npu.is_available():
393+
torch.npu.empty_cache()
392394
results = validate(args)
393395
return results
394396
except RuntimeError as e:

0 commit comments

Comments
 (0)