Skip to content

Commit 23d55a4

Browse files
Merge pull request #23 from SciML/core
Change to ArrayInterfaceCore
2 parents 8ea1bb5 + 41fc27b commit 23d55a4

File tree

3 files changed

+9
-9
lines changed

3 files changed

+9
-9
lines changed

Project.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,13 @@ version = "0.3.0"
55

66
[deps]
77
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
8-
ArrayInterface = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
8+
ArrayInterfaceCore = "30b0a656-2188-435a-8636-2ec0e6a096e2"
99
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
1010
LabelledArrays = "2ee39098-c373-598a-b85f-a56591580800"
1111

1212
[compat]
1313
Adapt = "3"
14-
ArrayInterface = "3, 5"
14+
ArrayInterfaceCore = "0.1.1"
1515
ForwardDiff = "0.10.3"
1616
LabelledArrays = "1"
1717
julia = "1.6"

src/PreallocationTools.jl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
module PreallocationTools
22

3-
using ForwardDiff, ArrayInterface, LabelledArrays, Adapt
3+
using ForwardDiff, ArrayInterfaceCore, LabelledArrays, Adapt
44

55
struct DiffCache{T<:AbstractArray, S<:AbstractArray}
66
du::T
77
dual_du::S
88
end
99

1010
function DiffCache(u::AbstractArray{T}, siz, chunk_sizes) where {T}
11-
x = adapt(ArrayInterface.parameterless_type(u), zeros(T, prod(chunk_sizes .+ 1)*prod(siz)))
11+
x = adapt(ArrayInterfaceCore.parameterless_type(u), zeros(T, prod(chunk_sizes .+ 1)*prod(siz)))
1212
DiffCache(u, x)
1313
end
1414

@@ -40,23 +40,23 @@ function get_tmp(dc::DiffCache, u::T) where T<:ForwardDiff.Dual
4040
if nelem > length(dc.dual_du)
4141
enlargedualcache!(dc, nelem)
4242
end
43-
ArrayInterface.restructure(dc.du, reinterpret(T, view(dc.dual_du, 1:nelem)))
43+
ArrayInterfaceCore.restructure(dc.du, reinterpret(T, view(dc.dual_du, 1:nelem)))
4444
end
4545

4646
function get_tmp(dc::DiffCache, u::AbstractArray{T}) where T<:ForwardDiff.Dual
4747
nelem = div(sizeof(T), sizeof(eltype(dc.dual_du)))*length(dc.du)
4848
if nelem > length(dc.dual_du)
4949
enlargedualcache!(dc, nelem)
5050
end
51-
ArrayInterface.restructure(dc.du, reinterpret(T, view(dc.dual_du, 1:nelem)))
51+
ArrayInterfaceCore.restructure(dc.du, reinterpret(T, view(dc.dual_du, 1:nelem)))
5252
end
5353

5454
function get_tmp(dc::DiffCache, u::LabelledArrays.LArray{T,N,D,Syms}) where {T,N,D,Syms}
5555
nelem = div(sizeof(T), sizeof(eltype(dc.dual_du)))*length(dc.du)
5656
if nelem > length(dc.dual_du)
5757
enlargedualcache!(dc, nelem)
5858
end
59-
_x = ArrayInterface.restructure(dc.du, reinterpret(T, view(dc.dual_du, 1:nelem)))
59+
_x = ArrayInterfaceCore.restructure(dc.du, reinterpret(T, view(dc.dual_du, 1:nelem)))
6060
LabelledArrays.LArray{T,N,D,Syms}(_x)
6161
end
6262

test/gpu_all.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
using LinearAlgebra, OrdinaryDiffEq, Test, PreallocationTools, CUDA, ForwardDiff, ArrayInterface
1+
using LinearAlgebra, OrdinaryDiffEq, Test, PreallocationTools, CUDA, ForwardDiff, ArrayInterfaceCore
22

33
#Dispatch tests
44
chunk_size = 5
@@ -10,7 +10,7 @@ tmp_du_CUA = get_tmp(cache_CU, u0_CU)
1010
tmp_dual_du_CUA = get_tmp(cache_CU, dual_CU)
1111
tmp_du_CUN = get_tmp(cache_CU, 0.0)
1212
tmp_dual_du_CUN = get_tmp(cache_CU, dual_N)
13-
@test ArrayInterface.parameterless_type(typeof(cache_CU.dual_du)) == ArrayInterface.parameterless_type(typeof(u0_CU)) #check that dual cache array is a GPU array for performance reasons.
13+
@test ArrayInterfaceCore.parameterless_type(typeof(cache_CU.dual_du)) == ArrayInterfaceCore.parameterless_type(typeof(u0_CU)) #check that dual cache array is a GPU array for performance reasons.
1414
@test size(tmp_du_CUA) == size(u0_CU)
1515
@test typeof(tmp_du_CUA) == typeof(u0_CU)
1616
@test eltype(tmp_du_CUA) == eltype(u0_CU)

0 commit comments

Comments
 (0)