@@ -1185,46 +1185,87 @@ end
1185
1185
1186
1186
# ###########################################################################################
1187
1187
1188
+ using Random
1189
+
1188
1190
@testset " random numbers" begin
1189
1191
1190
1192
n = 256
1191
1193
1192
- @testset " basic" begin
1193
- function kernel (A:: CuDeviceArray{T} , B:: CuDeviceArray{T} ) where {T}
1194
- tid = threadIdx (). x
1195
- A[tid] = rand (T)
1196
- B[tid] = rand (T)
1197
- return nothing
1194
+ @testset " basic rand($T ), seed $seed " for T in (Int32, UInt32, Int64, UInt64, Int128, UInt128,
1195
+ Float32, Float64),
1196
+ seed in (nothing , #= missing,=# 1234 )
1197
+
1198
+ function apply_seed (seed)
1199
+ if seed === missing
1200
+ # should result in different numbers across launches
1201
+ Random. seed! ()
1202
+ # XXX : this currently doesn't work, because of the definition in Base,
1203
+ # `seed!(r::MersenneTwister=default_rng())`, which breaks overriding
1204
+ # `default_rng` with a non-MersenneTwister RNG.
1205
+ elseif seed != = nothing
1206
+ # should result in the same numbers
1207
+ Random. seed! (seed)
1208
+ elseif seed === nothing
1209
+ # should result in different numbers across launches,
1210
+ # as determined by the seed set during module loading.
1211
+ end
1198
1212
end
1199
1213
1200
- @testset for T in (Int32, UInt32, Int64, UInt64, Int128, UInt128,
1201
- Float32, Float64)
1214
+ # different kernel invocations should get different numbers
1215
+ @testset " across launches" begin
1216
+ function kernel (A:: AbstractArray{T} , seed) where {T}
1217
+ apply_seed (seed)
1218
+ tid = threadIdx (). x
1219
+ A[tid] = rand (T)
1220
+ return nothing
1221
+ end
1222
+
1202
1223
a = CUDA. zeros (T, n)
1203
1224
b = CUDA. zeros (T, n)
1204
1225
1205
- @cuda threads= n kernel (a, b)
1226
+ @cuda threads= n kernel (a, seed)
1227
+ @cuda threads= n kernel (b, seed)
1206
1228
1207
- @test all (Array (a) .!= Array (b))
1229
+ if seed === nothing || seed === missing
1230
+ @test all (Array (a) .!= Array (b))
1231
+ else
1232
+ @test Array (a) == Array (b)
1233
+ end
1208
1234
end
1209
- end
1210
1235
1211
- @testset " custom seed" begin
1212
- function kernel (A:: CuDeviceArray{T} ) where {T}
1213
- tid = threadIdx (). x
1214
- Random. seed! (1234 )
1215
- A[tid] = rand (T)
1216
- return nothing
1217
- end
1236
+ # multiple calls to rand should get different numbers
1237
+ @testset " across calls" begin
1238
+ function kernel (A:: AbstractArray{T} , B:: AbstractArray{T} , seed) where {T}
1239
+ apply_seed (seed)
1240
+ tid = threadIdx (). x
1241
+ A[tid] = rand (T)
1242
+ B[tid] = rand (T)
1243
+ return nothing
1244
+ end
1218
1245
1219
- @testset for T in (Int32, UInt32, Int64, UInt64, Int128, UInt128,
1220
- Float32, Float64)
1221
1246
a = CUDA. zeros (T, n)
1222
1247
b = CUDA. zeros (T, n)
1223
1248
1224
- @cuda threads= n kernel (a)
1225
- @cuda threads= n kernel (b)
1249
+ @cuda threads= n kernel (a, b, seed)
1250
+
1251
+ @test all (Array (a) .!= Array (b))
1252
+ end
1253
+
1254
+ # different threads should get different numbers
1255
+ @testset " across threads" for active_dim in 1 : 6
1256
+ function kernel (A:: AbstractArray{T} , seed) where {T}
1257
+ apply_seed (seed)
1258
+ id = threadIdx (). x* threadIdx (). y* threadIdx (). z* blockIdx (). x* blockIdx (). y* blockIdx (). z
1259
+ A[id] = rand (T)
1260
+ return nothing
1261
+ end
1262
+
1263
+ tx, ty, tz, bx, by, bz = [dim == active_dim ? 2 : 1 for dim in 1 : 6 ]
1264
+ a = CUDA. zeros (T, 2 )
1265
+
1266
+ @cuda threads= (tx, ty, tz) blocks= (bx, by, bz) kernel (a, seed)
1226
1267
1227
- @test Array (a) == Array (b)
1268
+ @test Array (a)[ 1 ] != Array (a)[ 2 ]
1228
1269
end
1229
1270
end
1230
1271
0 commit comments