From 9e9700c8a29dc69b0b4c87efd5b243f5ff92969f Mon Sep 17 00:00:00 2001 From: Michel Schanen Date: Mon, 10 Jul 2023 15:38:52 -0500 Subject: [PATCH] Remove AMDGPU dependency --- Project.toml | 7 +++---- src/ExaAdmmBackend/ExaAdmmBackend.jl | 8 ++++---- test/blockmodel.jl | 12 ------------ test/convergence.jl | 12 ------------ 4 files changed, 7 insertions(+), 32 deletions(-) diff --git a/Project.toml b/Project.toml index 95ff99b..a3c6971 100644 --- a/Project.toml +++ b/Project.toml @@ -1,10 +1,10 @@ name = "ProxAL" uuid = "12c3852d-bf95-4e7b-be60-68937c3c927b" authors = ["Anirudh Subramanyam ", "Youngdae Kim ", "Francois Pacaud ", "Michel Schanen "] -version = "0.10.0" +version = "0.11.0" [deps] -AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" CatViews = "81a5f4ea-a946-549a-aa7e-2a7f63a27d31" DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" @@ -23,10 +23,9 @@ TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" [compat] Adapt = "3" -AMDGPU = "0.4" CUDA = "4.1" CatViews = "1" -ExaAdmm = "0.5" +ExaAdmm = "0.6" ExaPF = "0.9" ExaTron = "3" HDF5 = "0.16" diff --git a/src/ExaAdmmBackend/ExaAdmmBackend.jl b/src/ExaAdmmBackend/ExaAdmmBackend.jl index 9f05101..7a624c9 100644 --- a/src/ExaAdmmBackend/ExaAdmmBackend.jl +++ b/src/ExaAdmmBackend/ExaAdmmBackend.jl @@ -1,7 +1,7 @@ module ExaAdmmBackend +using Adapt using CUDA -using AMDGPU import MPI using KernelAbstractions using ExaAdmm @@ -129,9 +129,9 @@ function ExaAdmm.AdmmEnv(opfdata, rho_va::Float64, rho_pq::Float64; use_gpu=fals T = Float64 if use_gpu if !isa(ka_device, Nothing) - VT = typeof(ExaAdmm.KAArray{Float64}(0, ka_device)) - VI = typeof(ExaAdmm.KAArray{Int}(0, ka_device)) - MT = typeof(ExaAdmm.KAArray{Float64}(0, 0, ka_device)) + VT = typeof(adapt(ka_device, Vector{Float64}(undef, 0))) + VI = typeof(adapt(ka_device, Vector{Int}(undef, 0))) + MT = typeof(adapt(ka_device, Matrix{Float64}(undef, 0, 0))) else VT = CuVector{Float64} VI = CuVector{Int} diff --git a/test/blockmodel.jl b/test/blockmodel.jl index 098bb7c..b8b536b 100644 --- a/test/blockmodel.jl +++ b/test/blockmodel.jl @@ -24,21 +24,9 @@ load_file = joinpath(DATA_DIR, "mp_demand", "$(case)_oneweek_168") solver_list = ["ExaAdmmCPU"] if CUDA.has_cuda_gpu() - function ProxAL.ExaAdmm.KAArray{T}(n::Int, device::CUDABackend) where {T} - return CuArray{T}(undef, n) - end - function ProxAL.ExaAdmm.KAArray{T}(n1::Int, n2::Int, device::CUDABackend) where {T} - return CuArray{T}(undef, n1, n2) - end gpu_device = CUDABackend() push!(solver_list, "ExaAdmmGPUKA") elseif AMDGPU.has_rocm_gpu() - function ProxAL.ExaAdmm.KAArray{T}(n::Int, device::ROCBackend) where {T} - return ROCArray{T}(undef, n) - end - function ProxAL.ExaAdmm.KAArray{T}(n1::Int, n2::Int, device::ROCBackend) where {T} - return ROCArray{T}(undef, n1, n2) - end gpu_device = ROCBackend() push!(solver_list, "ExaAdmmGPUKA") end diff --git a/test/convergence.jl b/test/convergence.jl index 331c050..efdd86d 100644 --- a/test/convergence.jl +++ b/test/convergence.jl @@ -39,21 +39,9 @@ algparams.verbose = 0 solver_list = ["Ipopt", "ExaAdmmCPU"] if CUDA.has_cuda_gpu() push!(solver_list, "ExaAdmmGPU") - function ProxAL.ExaAdmm.KAArray{T}(n::Int, device::CUDABackend) where {T} - return CuArray{T}(undef, n) - end - function ProxAL.ExaAdmm.KAArray{T}(n1::Int, n2::Int, device::CUDABackend) where {T} - return CuArray{T}(undef, n1, n2) - end gpu_device = CUDABackend() push!(solver_list, "ExaAdmmGPUKA") elseif AMDGPU.has_rocm_gpu() - function ProxAL.ExaAdmm.KAArray{T}(n::Int, device::ROCBackend) where {T} - return ROCArray{T}(undef, n) - end - function ProxAL.ExaAdmm.KAArray{T}(n1::Int, n2::Int, device::ROCBackend) where {T} - return ROCArray{T}(undef, n1, n2) - end gpu_device = ROCBackend() push!(solver_list, "ExaAdmmGPUKA") end