Skip to content

Commit

Permalink
Merge pull request #22 from EricHallahan/master
Browse files Browse the repository at this point in the history
Update DaggerGPU to be compatible with Dagger 0.16
  • Loading branch information
jpsamaroo authored Jan 19, 2023
2 parents 8f11922 + f34b00e commit 08fd338
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 21 deletions.
16 changes: 3 additions & 13 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,7 @@ UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[compat]
Adapt = "1, 2, 3"
Dagger = "0.13.3, 0.14, 0.15, 0.16"
KernelAbstractions = "0.5, 0.6, 0.7"
MemPool = "0.3"
KernelAbstractions = "0.5, 0.6, 0.7, 0.8"
MemPool = "0.3, 0.4"
Requires = "1"
julia = "1"

[extras]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["AMDGPU", "CUDA", "Distributed", "Test"]
julia = "1.6"
9 changes: 9 additions & 0 deletions test/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
[deps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
Dagger = "d58978e5-989f-55fb-8d15-ea34adc7bf54"
28 changes: 20 additions & 8 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,19 @@
using Distributed
using Test
using Distributed
addprocs(2, exeflags="--project")

@everywhere begin
using CUDA, AMDGPU, Metal, KernelAbstractions
try using CUDA
catch end

try using AMDGPU
catch end

try using Metal
catch end

using Distributed, Dagger, DaggerGPU
using KernelAbstractions
end
@everywhere begin
function myfunc(X)
Expand Down Expand Up @@ -52,21 +61,24 @@ end
else
cuproc = DaggerGPU.processor(:CUDA)
b = generate_thunks()
opts = Dagger.Sch.ThunkOptions(;proctypes=[cuproc])
opts = Dagger.Sch.ThunkOptions(;proclist=[cuproc])
c_pre = delayed(myfunc; options=opts)(b)
c = delayed(sum; options=opts)(b)

opts = Dagger.Sch.ThunkOptions(;proctypes=[Dagger.ThreadProc])
opts = Dagger.Sch.ThunkOptions(;proclist=[Dagger.ThreadProc])
d = delayed(identity; options=opts)(c)
@test collect(d) == 20

@test_skip "KernelAbstractions"
#= FIXME
@testset "KernelAbstractions" begin
cuproc = DaggerGPU.processor(:CUDA)
opts = Dagger.Sch.ThunkOptions(;proctypes=[cuproc])
opts = Dagger.Sch.ThunkOptions(;proclist=[cuproc])
A = rand(Float32, 8)
_A = collect(delayed(fill_thunk)(A, 2.3); options=opts)
@test all(_A .== 2.3)
end
=#
end
end

Expand All @@ -76,19 +88,19 @@ end
else
rocproc = DaggerGPU.processor(:ROC)
b = generate_thunks()
opts = Dagger.Sch.ThunkOptions(;proctypes=[rocproc])
opts = Dagger.Sch.ThunkOptions(;proclist=[rocproc])
c_pre = delayed(myfunc; options=opts)(b)
c = delayed(sum; options=opts)(b)

opts = Dagger.Sch.ThunkOptions(;proctypes=[Dagger.ThreadProc])
opts = Dagger.Sch.ThunkOptions(;proclist=[Dagger.ThreadProc])
d = delayed(identity; options=opts)(c)
@test collect(d) == 20

@test_skip "KernelAbstractions"
#= FIXME
@testset "KernelAbstractions" begin
rocproc = DaggerGPU.processor(:ROC)
opts = Dagger.Sch.ThunkOptions(;proctypes=[rocproc])
opts = Dagger.Sch.ThunkOptions(;proclist=[rocproc])
A = rand(Float32, 8)
_A = collect(delayed(fill_thunk)(A, 2.3); options=opts)
@test all(_A .== 2.3)
Expand Down

0 comments on commit 08fd338

Please sign in to comment.