diff --git a/deps/build_ci.jl b/deps/build_ci.jl index 831872c8..68ed8c9c 100644 --- a/deps/build_ci.jl +++ b/deps/build_ci.jl @@ -16,6 +16,7 @@ if isdir(joinpath(@__DIR__), "..", ".git") @info "Latest change to the wrappers: $(unix2datetime(deps_timestamp))" # find out which version of oneAPI_Support_jll we are using + Pkg.add(; url="https://github.com/leios/GPUArrays.jl/", rev="yoyoyo_rebase_time") Pkg.activate(joinpath(@__DIR__, "..")) Pkg.instantiate() deps = collect(values(Pkg.dependencies())) diff --git a/src/gpuarrays.jl b/src/gpuarrays.jl index 614e655d..1cc39492 100644 --- a/src/gpuarrays.jl +++ b/src/gpuarrays.jl @@ -1,5 +1,7 @@ # GPUArrays.jl interface +import KernelAbstractions +import KernelAbstractions: Backend # # Device functionality @@ -8,13 +10,16 @@ ## execution -struct oneArrayBackend <: AbstractGPUBackend end +@inline function GPUArrays.launch_heuristic(::oneAPIBackend, obj::O, args::Vararg{Any,N}; + elements::Int, elements_per_thread::Int) where {O,N} + ndrange = ceil(Int, elements / elements_per_thread) + ndrange, workgroupsize, iterspace, dynamic = KA.launch_config(obj, ndrange, + nothing) -struct oneKernelContext <: AbstractKernelContext end + # this might not be the final context, since we may tune the workgroupsize + ctx = KA.mkcontext(obj, ndrange, iterspace) -@inline function GPUArrays.launch_heuristic(::oneArrayBackend, f::F, args::Vararg{Any,N}; - elements::Int, elements_per_thread::Int) where {F,N} - kernel = @oneapi launch=false f(oneKernelContext(), args...) + kernel = @oneapi launch=false obj.f(ctx, args...) items = launch_configuration(kernel) # XXX: how many groups is a good number? the API doesn't tell us. @@ -23,48 +28,6 @@ struct oneKernelContext <: AbstractKernelContext end return (threads=items, blocks=32) end -function GPUArrays.gpu_call(::oneArrayBackend, f, args, threads::Int, blocks::Int; - name::Union{String,Nothing}) - @oneapi items=threads groups=blocks name=name f(oneKernelContext(), args...) -end - - -## on-device - -# indexing - -GPUArrays.blockidx(ctx::oneKernelContext) = oneAPI.get_group_id(0) -GPUArrays.blockdim(ctx::oneKernelContext) = oneAPI.get_local_size(0) -GPUArrays.threadidx(ctx::oneKernelContext) = oneAPI.get_local_id(0) -GPUArrays.griddim(ctx::oneKernelContext) = oneAPI.get_num_groups(0) - -# math - -@inline GPUArrays.cos(ctx::oneKernelContext, x) = oneAPI.cos(x) -@inline GPUArrays.sin(ctx::oneKernelContext, x) = oneAPI.sin(x) -@inline GPUArrays.sqrt(ctx::oneKernelContext, x) = oneAPI.sqrt(x) -@inline GPUArrays.log(ctx::oneKernelContext, x) = oneAPI.log(x) - -# memory - -@inline function GPUArrays.LocalMemory(::oneKernelContext, ::Type{T}, ::Val{dims}, ::Val{id} - ) where {T, dims, id} - ptr = oneAPI.emit_localmemory(Val(id), T, Val(prod(dims))) - oneDeviceArray(dims, LLVMPtr{T, onePI.AS.Local}(ptr)) -end - -# synchronization - -@inline GPUArrays.synchronize_threads(::oneKernelContext) = oneAPI.barrier() - - - -# -# Host abstractions -# - -GPUArrays.backend(::Type{<:oneArray}) = oneArrayBackend() - const GLOBAL_RNGs = Dict{ZeDevice,GPUArrays.RNG}() function GPUArrays.default_rng(::Type{<:oneArray}) dev = device() diff --git a/src/oneAPI.jl b/src/oneAPI.jl index 259fea77..00d83593 100644 --- a/src/oneAPI.jl +++ b/src/oneAPI.jl @@ -17,6 +17,8 @@ using Core: LLVMPtr using SPIRV_LLVM_Translator_unified_jll, SPIRV_Tools_jll +import KernelAbstractions as KA + export oneL0 # core library @@ -67,14 +69,14 @@ end # integrations and specialized functionality include("broadcast.jl") include("mapreduce.jl") +include("oneAPIKernels.jl") +import .oneAPIKernels: oneAPIBackend, KA.launch_config +export oneAPIBackend + include("gpuarrays.jl") include("random.jl") include("utils.jl") -include("oneAPIKernels.jl") -import .oneAPIKernels: oneAPIBackend -export oneAPIBackend - function __init__() precompiling = ccall(:jl_generating_output, Cint, ()) != 0 precompiling && return