From c73797592ef9558f89b6de9ed0b23066767fbcee Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Mon, 29 Apr 2024 13:57:46 -0500 Subject: [PATCH 01/23] GPU Support --- Dockerfile | 16 +++++-- README.md | 5 ++ scripts/build_proc_cpu.sh | 12 ----- scripts/build_proc_gpu.sh | 98 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 116 insertions(+), 15 deletions(-) create mode 100644 scripts/build_proc_gpu.sh diff --git a/Dockerfile b/Dockerfile index 69ec3e1..a5de4d2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,9 +22,18 @@ ENV PYTHONDONTWRITEBYTECODE=true ENV PROC_HOME=/home/conda/back-projection ENV MYHOME=/home/conda -RUN apt-get update && apt-get install -y --no-install-recommends unzip vim curl build-essential gfortran libfftw3-dev && \ +RUN apt-get update && apt-get install -y --no-install-recommends unzip vim curl build-essential gfortran libfftw3-dev nvidia-driver-535 software-properties-common && \ apt-get clean && rm -rf /var/lib/apt/lists/* +RUN wget https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-debian12-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ + dpkg -i cuda-repo-debian12-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ + cp /var/cuda-repo-debian12-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/ && \ + add-apt-repository contrib && \ + apt-get update && \ + apt-get -y install cuda-toolkit-12-4 && \ + nvcc --version + +ARG USE_GPU=yes ARG CONDA_UID=1000 ARG CONDA_GID=1000 ARG BACK_PROJECTION_TAG=0.2.0 @@ -36,7 +45,6 @@ RUN groupadd -g "${CONDA_GID}" --system conda && \ echo ". /opt/conda/etc/profile.d/conda.sh" >> /home/conda/.profile && \ echo "conda activate base" >> /home/conda/.profile - USER ${CONDA_UID} SHELL ["/bin/bash", "-l", "-c"] WORKDIR /home/conda/ @@ -47,10 +55,12 @@ RUN curl -sL https://github.com/ASFHyP3/back-projection/archive/refs/tags/v${BAC rm ./back-projection.tar.gz && \ rm -rf ./back-projection/fft -COPY --chown=${CONDA_UID}:${CONDA_GID} ./scripts/build_proc_cpu.sh ./back-projection +COPY --chown=${CONDA_UID}:${CONDA_GID} ./scripts/build_proc_cpu.sh ./scripts/build_proc_gpu.sh ./back-projection RUN cd /home/conda/back-projection && \ chmod +x ./build_proc_cpu.sh && \ + chmod +x ./build_proc_gpu.sh && \ ./build_proc_cpu.sh && \ + if [[ $arg == "yes" ]] ; echo "Skipping GPU build..." ; ./build_proc_cpu.sh && \ cd /home/conda/ COPY --chown=${CONDA_UID}:${CONDA_GID} . /hyp3-back-projection/ diff --git a/README.md b/README.md index 9f19547..701519c 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,8 @@ # HyP3 back-projection HyP3 plugin for back-projection processing + +### GPU Setup: +In order for Docker to be able to use the host's GPU, the host must have the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/index.html) installed and configured. +The process is different for different OS's and Linux distros. The setup process for the most common distros, including Ubuntu, +can be found [here](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#configuration). Make sure to follow the [Docker configuration steps](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#configuration) after installing the package. **This process is not necessary when running in AWS while using an EC2 image made with GPU support.** \ No newline at end of file diff --git a/scripts/build_proc_cpu.sh b/scripts/build_proc_cpu.sh index e5bed5e..8578e68 100644 --- a/scripts/build_proc_cpu.sh +++ b/scripts/build_proc_cpu.sh @@ -19,10 +19,6 @@ gfortran -c processsubcpu.f90 backprojectcpusub.f90 bounds.f90 orbitrangetime.f9 gcc -o sentinel_raw_process_cpu sentinel_raw_process_cpu.o decode_line_memory.o processsubcpu.o backprojectcpusub.o azimuth_compress_cpu.o bounds.o orbitrangetime.o latlon.o intp_orbit.o radar_to_xyz.o unitvec.o tcnbasis.o curvature.o cross.o orbithermite.o filelen.o io.o sentineltimingsub.o getburststatevectors.o $FFTW_LIB -lgfortran -lgomp -lm -lrt -lpthread echo 'built sentinel_raw_process_cpu' -# nvcc -o howmanygpus howmanygpus.cu -# -# echo 'built howmanygpus' - cd geo2rdr gfortran -o estimatebaseline estimatebaseline.f90 intp_orbit.f90 latlon.f90 orbithermite.f -ffixed-line-length-none @@ -79,20 +75,12 @@ cd .. echo 'built snaphu' -# nvcc -o gpu_arch gpu_arch.cu -# echo 'built gpu architecture probe' -# -# ./gpu_arch | cat > GPU_ARCH; source ./GPU_ARCH; rm GPU_ARCH - cd sentinel gcc -c filelen.c io.c sentinel_raw_process.c decode_line_memory.c -lm -fopenmp echo 'built raw_process components in sentinel' -# nvcc -gencode arch=compute_$GPU_ARCH,code=sm_$GPU_ARCH -c azimuth_compress.cu -Wno-deprecated-gpu-targets - gfortran -c processsub.f90 backprojectgpusub.f90 bounds.f90 orbitrangetime.f90 latlon.f90 intp_orbit.f90 radar_to_xyz.f90 unitvec.f90 tcnbasis.f90 curvature.f90 cross.f90 orbithermite.f sentineltimingsub.f90 getburststatevectors.f90 -ffixed-line-length-none -fopenmp -# nvcc -o sentinel_raw_process sentinel_raw_process.o decode_line_memory.o processsub.o backprojectgpusub.o azimuth_compress.o bounds.o orbitrangetime.o latlon.o intp_orbit.o radar_to_xyz.o unitvec.o tcnbasis.o curvature.o cross.o orbithermite.o filelen.o io.o sentineltimingsub.o getburststatevectors.o $FFTW_LIB -lstdc++ -lgfortran -lgomp cd .. diff --git a/scripts/build_proc_gpu.sh b/scripts/build_proc_gpu.sh new file mode 100644 index 0000000..5f99e78 --- /dev/null +++ b/scripts/build_proc_gpu.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +MULTIARCH_DIR=/usr/lib/$(gcc -print-multiarch) +FFTW_LIB=$MULTIARCH_DIR/libfftw3f.a +echo 'using FFTW library:' $FFTW_LIB + +# Works +cd DEM +gfortran -o mosaicDEM mosaicDEM.f90 +gfortran -o createspecialdem createspecialdem.f90 +gfortran -o geoid2008_ellipsoid_interpolate geoid2008_ellipsoid_interpolate.f90 +cd .. + +cd sentinel +gfortran -o createslc createslc.f90 +gcc -c azimuth_compress_cpu.c -lm -fopenmp +gcc -c filelen.c io.c sentinel_raw_process_cpu.c decode_line_memory.c -lm -fopenmp +gfortran -c processsubcpu.f90 backprojectcpusub.f90 bounds.f90 orbitrangetime.f90 latlon.f90 intp_orbit.f90 radar_to_xyz.f90 unitvec.f90 tcnbasis.f90 curvature.f90 cross.f90 orbithermite.f sentineltimingsub.f90 getburststatevectors.f90 -ffixed-line-length-none -fopenmp +gcc -o sentinel_raw_process_cpu sentinel_raw_process_cpu.o decode_line_memory.o processsubcpu.o backprojectcpusub.o azimuth_compress_cpu.o bounds.o orbitrangetime.o latlon.o intp_orbit.o radar_to_xyz.o unitvec.o tcnbasis.o curvature.o cross.o orbithermite.o filelen.o io.o sentineltimingsub.o getburststatevectors.o $FFTW_LIB -lgfortran -lgomp -lm -lrt -lpthread +echo 'built sentinel_raw_process_cpu' + +nvcc -o howmanygpus howmanygpus.cu + +echo 'built howmanygpus' + +cd geo2rdr +gfortran -o estimatebaseline estimatebaseline.f90 intp_orbit.f90 latlon.f90 orbithermite.f -ffixed-line-length-none + +echo 'finished with sentinel directory' + +cd ../.. +cd util +gcc -c io.c +gfortran -o nbymi2 nbymi2.f io.o +gfortran -c lsq.f90 +gfortran -o regressheight regressheight.f90 lsq.o + +echo 'finished with util directory' + +cd .. +cd int +gfortran -o findrefpoints findrefpoints.f90 +gfortran -o refpointsfromsim refpointsfromsim.f90 + +echo 'built findrefpoints' + +gcc -c ../util/filelen.c + +echo 'compiled filelen' + +gfortran -o crossmul crossmul.f90 filelen.o $FFTW_LIB -fopenmp -lrt -lpthread + +echo 'built crossmul' + +gfortran -o makecc makecc.f90 filelen.o -fopenmp -lrt -lpthread + +echo 'finished with int directory' + +cd .. +cd sbas +gfortran -o sbas sbas.f90 svd.f90 -fopenmp -lrt -lpthread +echo 'built sbas in sbas directory' + +cd .. +cd ps +gfortran -o cosine_sim cosine_sim.f90 -fopenmp $FFTW_LIB +gfortran -o psinterp psinterp.f90 -fopenmp +echo 'Built cosine_sim and psinterp in ps directory' + +cd .. +tar xf snaphu_v2_0b0_0_0.tar +cd snaphu_v2.0b0.0.0/src +make CFLAGS=-O3 -s + +cd .. +if [ -e $PROC_HOME/bin ]; then echo "copying snaphu"; else mkdir $PROC_HOME/bin; fi +cp bin/snaphu $PROC_HOME/bin/snaphu +cd .. + +echo 'built snaphu' + +# nvcc -o gpu_arch gpu_arch.cu +# echo 'built gpu architecture probe' + +# ./gpu_arch | cat > GPU_ARCH; source ./GPU_ARCH; rm GPU_ARCH + +cd sentinel + +gcc -c filelen.c io.c sentinel_raw_process.c decode_line_memory.c -lm -fopenmp + +echo 'built raw_process components in sentinel' + +nvcc -gencode arch=compute_89,code=sm_89 -c azimuth_compress.cu -Wno-deprecated-gpu-targets + +gfortran -c processsub.f90 backprojectgpusub.f90 bounds.f90 orbitrangetime.f90 latlon.f90 intp_orbit.f90 radar_to_xyz.f90 unitvec.f90 tcnbasis.f90 curvature.f90 cross.f90 orbithermite.f sentineltimingsub.f90 getburststatevectors.f90 -ffixed-line-length-none -fopenmp + +nvcc -o sentinel_raw_process sentinel_raw_process.o decode_line_memory.o processsub.o backprojectgpusub.o azimuth_compress.o bounds.o orbitrangetime.o latlon.o intp_orbit.o radar_to_xyz.o unitvec.o tcnbasis.o curvature.o cross.o orbithermite.o filelen.o io.o sentineltimingsub.o getburststatevectors.o $FFTW_LIB -lstdc++ -lgfortran -lgomp +cd .. From ae59337bdef8948b2ee0239216e6ea21f0d355d9 Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Mon, 29 Apr 2024 16:21:00 -0500 Subject: [PATCH 02/23] Move CUDA installation to script for conditional install --- Dockerfile | 30 ++++++++++++++++-------------- scripts/install_cuda.sh | 15 +++++++++++++++ 2 files changed, 31 insertions(+), 14 deletions(-) create mode 100644 scripts/install_cuda.sh diff --git a/Dockerfile b/Dockerfile index a5de4d2..1a5f0c8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,22 +18,14 @@ LABEL org.opencontainers.image.documentation="https://hyp3-docs.asf.alaska.edu" ARG DEBIAN_FRONTEND=noninteractive +ENV USEGPU="yes" ENV PYTHONDONTWRITEBYTECODE=true ENV PROC_HOME=/home/conda/back-projection ENV MYHOME=/home/conda -RUN apt-get update && apt-get install -y --no-install-recommends unzip vim curl build-essential gfortran libfftw3-dev nvidia-driver-535 software-properties-common && \ +RUN apt-get update && apt-get install -y --no-install-recommends unzip vim curl build-essential gfortran libfftw3-dev nvidia-driver-535 && \ apt-get clean && rm -rf /var/lib/apt/lists/* -RUN wget https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-debian12-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ - dpkg -i cuda-repo-debian12-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ - cp /var/cuda-repo-debian12-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/ && \ - add-apt-repository contrib && \ - apt-get update && \ - apt-get -y install cuda-toolkit-12-4 && \ - nvcc --version - -ARG USE_GPU=yes ARG CONDA_UID=1000 ARG CONDA_GID=1000 ARG BACK_PROJECTION_TAG=0.2.0 @@ -45,22 +37,32 @@ RUN groupadd -g "${CONDA_GID}" --system conda && \ echo ". /opt/conda/etc/profile.d/conda.sh" >> /home/conda/.profile && \ echo "conda activate base" >> /home/conda/.profile -USER ${CONDA_UID} SHELL ["/bin/bash", "-l", "-c"] + +COPY ./scripts/install_cuda.sh ./ +RUN chmod +x ./install_cuda.sh +RUN if [[ $USEGPU == "yes" ]] ; then ./install_cuda.sh ; else echo "Skipping CUDA install..." ; fi + +USER ${CONDA_UID} WORKDIR /home/conda/ +RUN if [[ $USEGPU == "yes" ]] ; then \ + echo "export PATH="/usr/local/cuda-12.4/bin:$PATH"" >> .bashrc && \ + echo "export LD_LIBRARY_PATH="/usr/local/cuda-12.4/lib64:$LD_LIBRARY_PATH"" >> .bashrc \ + ; else echo "Skipping exporting CUDA path." ; fi + RUN curl -sL https://github.com/ASFHyP3/back-projection/archive/refs/tags/v${BACK_PROJECTION_TAG}.tar.gz > ./back-projection.tar.gz && \ mkdir -p ./back-projection && \ tar -xvf ./back-projection.tar.gz -C ./back-projection/ --strip=1 && \ rm ./back-projection.tar.gz && \ rm -rf ./back-projection/fft -COPY --chown=${CONDA_UID}:${CONDA_GID} ./scripts/build_proc_cpu.sh ./scripts/build_proc_gpu.sh ./back-projection +COPY --chown=${CONDA_UID}:${CONDA_GID} ./scripts/build_proc_cpu.sh ./back-projection +COPY --chown=${CONDA_UID}:${CONDA_GID} ./scripts/build_proc_gpu.sh ./back-projection RUN cd /home/conda/back-projection && \ chmod +x ./build_proc_cpu.sh && \ chmod +x ./build_proc_gpu.sh && \ - ./build_proc_cpu.sh && \ - if [[ $arg == "yes" ]] ; echo "Skipping GPU build..." ; ./build_proc_cpu.sh && \ + if [[ $USEGPU == "yes" ]] ; then ./build_proc_cpu.sh ; else ./build_proc_cpu.sh ; fi && \ cd /home/conda/ COPY --chown=${CONDA_UID}:${CONDA_GID} . /hyp3-back-projection/ diff --git a/scripts/install_cuda.sh b/scripts/install_cuda.sh new file mode 100644 index 0000000..1aceecd --- /dev/null +++ b/scripts/install_cuda.sh @@ -0,0 +1,15 @@ +# Script to install nvidia drivers and the nvidia cuda toolkit. + +# Install Cuda Toolkit 12.4 +# Source: https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=20.04&target_type=deb_local +wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin && \ +mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 && \ +wget https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-ubuntu2004-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ +dpkg -i cuda-repo-ubuntu2004-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ +cp /var/cuda-repo-ubuntu2004-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/ && \ +apt-get update && \ +apt-get -y install cuda-toolkit-12-4 + +# Add Cuda Toolkit binaries to the Path. +echo "export PATH="/usr/local/cuda-12.4/bin:$PATH"" >> .bashrc +echo "export LD_LIBRARY_PATH="/usr/local/cuda-12.4/lib64:$LD_LIBRARY_PATH"" >> .bashrc \ No newline at end of file From f697e193a162e49137596799733224a368846a3d Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Mon, 29 Apr 2024 16:24:33 -0500 Subject: [PATCH 03/23] removed path export --- scripts/install_cuda.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scripts/install_cuda.sh b/scripts/install_cuda.sh index 1aceecd..2102f9e 100644 --- a/scripts/install_cuda.sh +++ b/scripts/install_cuda.sh @@ -9,7 +9,3 @@ dpkg -i cuda-repo-ubuntu2004-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ cp /var/cuda-repo-ubuntu2004-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cuda-toolkit-12-4 - -# Add Cuda Toolkit binaries to the Path. -echo "export PATH="/usr/local/cuda-12.4/bin:$PATH"" >> .bashrc -echo "export LD_LIBRARY_PATH="/usr/local/cuda-12.4/lib64:$LD_LIBRARY_PATH"" >> .bashrc \ No newline at end of file From 84255e8ed0301a78c7be21588223013b060e0170 Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Mon, 29 Apr 2024 16:28:30 -0500 Subject: [PATCH 04/23] removed gpuarch code (wont work during docker build) --- scripts/build_proc_gpu.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/scripts/build_proc_gpu.sh b/scripts/build_proc_gpu.sh index 5f99e78..7c3a6c2 100644 --- a/scripts/build_proc_gpu.sh +++ b/scripts/build_proc_gpu.sh @@ -79,11 +79,6 @@ cd .. echo 'built snaphu' -# nvcc -o gpu_arch gpu_arch.cu -# echo 'built gpu architecture probe' - -# ./gpu_arch | cat > GPU_ARCH; source ./GPU_ARCH; rm GPU_ARCH - cd sentinel gcc -c filelen.c io.c sentinel_raw_process.c decode_line_memory.c -lm -fopenmp From de985361e8e314e81ff9556165aec587c8f182a2 Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Mon, 29 Apr 2024 16:29:29 -0500 Subject: [PATCH 05/23] updated changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 68e8298..f9493f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [PEP 440](https://www.python.org/dev/peps/pep-0440/) and uses [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.4.0] + +### Added +* Support for GPU accelerated processing using CUDA. ## [0.3.0] From f8c4210e126f64dca1f67701e1e38a3a757b69d9 Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Tue, 30 Apr 2024 12:28:16 -0500 Subject: [PATCH 06/23] change things to not use gpu by default --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 1a5f0c8..fe9a29b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ LABEL org.opencontainers.image.documentation="https://hyp3-docs.asf.alaska.edu" ARG DEBIAN_FRONTEND=noninteractive -ENV USEGPU="yes" +ENV USEGPU="no" ENV PYTHONDONTWRITEBYTECODE=true ENV PROC_HOME=/home/conda/back-projection ENV MYHOME=/home/conda From 53f4a367f54fdab0cb5fed7785abc40222b6828d Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Tue, 7 May 2024 08:55:23 -0500 Subject: [PATCH 07/23] combine cpu/gpu build proc --- Dockerfile | 15 ++-- scripts/{build_proc_cpu.sh => build_proc.sh} | 13 +++ scripts/build_proc_gpu.sh | 93 -------------------- src/hyp3_back_projection/back_projection.py | 17 ++-- 4 files changed, 32 insertions(+), 106 deletions(-) rename scripts/{build_proc_cpu.sh => build_proc.sh} (82%) delete mode 100644 scripts/build_proc_gpu.sh diff --git a/Dockerfile b/Dockerfile index 2f23a3c..8856b7f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,9 @@ LABEL org.opencontainers.image.documentation="https://hyp3-docs.asf.alaska.edu" ARG DEBIAN_FRONTEND=noninteractive -ENV USEGPU="no" +ARG USEGPU="false" +ENV USEGPU=${USEGPU} + ENV PYTHONDONTWRITEBYTECODE=true ENV PROC_HOME=/home/conda/back-projection ENV MYHOME=/home/conda @@ -46,7 +48,7 @@ RUN if [[ $USEGPU == "yes" ]] ; then ./install_cuda.sh ; else echo "Skipping CUD USER ${CONDA_UID} WORKDIR /home/conda/ -RUN if [[ $USEGPU == "yes" ]] ; then \ +RUN if [[ $USEGPU == "true" ]] ; then \ echo "export PATH="/usr/local/cuda-12.4/bin:$PATH"" >> .bashrc && \ echo "export LD_LIBRARY_PATH="/usr/local/cuda-12.4/lib64:$LD_LIBRARY_PATH"" >> .bashrc \ ; else echo "Skipping exporting CUDA path." ; fi @@ -57,13 +59,10 @@ RUN curl -sL https://github.com/ASFHyP3/back-projection/archive/refs/tags/v${BAC rm ./back-projection.tar.gz && \ rm -rf ./back-projection/fft -COPY --chown=${CONDA_UID}:${CONDA_GID} ./scripts/build_proc_cpu.sh ./back-projection -COPY --chown=${CONDA_UID}:${CONDA_GID} ./scripts/build_proc_gpu.sh ./back-projection +COPY --chown=${CONDA_UID}:${CONDA_GID} ./scripts/build_proc.sh ./back-projection RUN cd /home/conda/back-projection && \ - chmod +x ./build_proc_cpu.sh && \ - chmod +x ./build_proc_gpu.sh && \ - if [[ $USEGPU == "yes" ]] ; then ./build_proc_cpu.sh ; else ./build_proc_cpu.sh ; fi && \ - ./build_proc_cpu.sh && \ + chmod +x ./build_proc.sh && \ + ./build_proc.sh && \ find $PROC_HOME -type f -name "*.py" -exec chmod +x {} + && \ cd /home/conda/ diff --git a/scripts/build_proc_cpu.sh b/scripts/build_proc.sh similarity index 82% rename from scripts/build_proc_cpu.sh rename to scripts/build_proc.sh index ef80fc2..f3cfae1 100644 --- a/scripts/build_proc_cpu.sh +++ b/scripts/build_proc.sh @@ -19,6 +19,11 @@ gfortran -c processsubcpu.f90 backprojectcpusub.f90 bounds.f90 orbitrangetime.f9 gcc -o sentinel_raw_process_cpu sentinel_raw_process_cpu.o decode_line_memory.o processsubcpu.o backprojectcpusub.o azimuth_compress_cpu.o bounds.o orbitrangetime.o latlon.o intp_orbit.o radar_to_xyz.o unitvec.o tcnbasis.o curvature.o cross.o orbithermite.o filelen.o io.o sentineltimingsub.o getburststatevectors.o $FFTW_LIB -lgfortran -lgomp -lm -lrt -lpthread echo 'built sentinel_raw_process_cpu' +if [[ "$USEGPU" == "true" ]]; then + nvcc -o howmanygpus howmanygpus.cu + echo 'built howmanygpus' +fi + cd geo2rdr gfortran -o estimatebaseline estimatebaseline.f90 intp_orbit.f90 latlon.f90 orbithermite.f -ffixed-line-length-none @@ -82,6 +87,14 @@ gcc -c filelen.c io.c sentinel_raw_process.c decode_line_memory.c -lm -fopenmp echo 'built raw_process components in sentinel' +if [[ "$USEGPU" == "true" ]]; then + nvcc -gencode arch=compute_89,code=sm_89 -c azimuth_compress.cu -Wno-deprecated-gpu-targets +fi + gfortran -c processsub.f90 backprojectgpusub.f90 bounds.f90 orbitrangetime.f90 latlon.f90 intp_orbit.f90 radar_to_xyz.f90 unitvec.f90 tcnbasis.f90 curvature.f90 cross.f90 orbithermite.f sentineltimingsub.f90 getburststatevectors.f90 -ffixed-line-length-none -fopenmp +if [[ "$USEGPU" == "true" ]]; then + nvcc -o sentinel_raw_process sentinel_raw_process.o decode_line_memory.o processsub.o backprojectgpusub.o azimuth_compress.o bounds.o orbitrangetime.o latlon.o intp_orbit.o radar_to_xyz.o unitvec.o tcnbasis.o curvature.o cross.o orbithermite.o filelen.o io.o sentineltimingsub.o getburststatevectors.o $FFTW_LIB -lstdc++ -lgfortran -lgomp +fi + cd .. diff --git a/scripts/build_proc_gpu.sh b/scripts/build_proc_gpu.sh deleted file mode 100644 index 7c3a6c2..0000000 --- a/scripts/build_proc_gpu.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash - -MULTIARCH_DIR=/usr/lib/$(gcc -print-multiarch) -FFTW_LIB=$MULTIARCH_DIR/libfftw3f.a -echo 'using FFTW library:' $FFTW_LIB - -# Works -cd DEM -gfortran -o mosaicDEM mosaicDEM.f90 -gfortran -o createspecialdem createspecialdem.f90 -gfortran -o geoid2008_ellipsoid_interpolate geoid2008_ellipsoid_interpolate.f90 -cd .. - -cd sentinel -gfortran -o createslc createslc.f90 -gcc -c azimuth_compress_cpu.c -lm -fopenmp -gcc -c filelen.c io.c sentinel_raw_process_cpu.c decode_line_memory.c -lm -fopenmp -gfortran -c processsubcpu.f90 backprojectcpusub.f90 bounds.f90 orbitrangetime.f90 latlon.f90 intp_orbit.f90 radar_to_xyz.f90 unitvec.f90 tcnbasis.f90 curvature.f90 cross.f90 orbithermite.f sentineltimingsub.f90 getburststatevectors.f90 -ffixed-line-length-none -fopenmp -gcc -o sentinel_raw_process_cpu sentinel_raw_process_cpu.o decode_line_memory.o processsubcpu.o backprojectcpusub.o azimuth_compress_cpu.o bounds.o orbitrangetime.o latlon.o intp_orbit.o radar_to_xyz.o unitvec.o tcnbasis.o curvature.o cross.o orbithermite.o filelen.o io.o sentineltimingsub.o getburststatevectors.o $FFTW_LIB -lgfortran -lgomp -lm -lrt -lpthread -echo 'built sentinel_raw_process_cpu' - -nvcc -o howmanygpus howmanygpus.cu - -echo 'built howmanygpus' - -cd geo2rdr -gfortran -o estimatebaseline estimatebaseline.f90 intp_orbit.f90 latlon.f90 orbithermite.f -ffixed-line-length-none - -echo 'finished with sentinel directory' - -cd ../.. -cd util -gcc -c io.c -gfortran -o nbymi2 nbymi2.f io.o -gfortran -c lsq.f90 -gfortran -o regressheight regressheight.f90 lsq.o - -echo 'finished with util directory' - -cd .. -cd int -gfortran -o findrefpoints findrefpoints.f90 -gfortran -o refpointsfromsim refpointsfromsim.f90 - -echo 'built findrefpoints' - -gcc -c ../util/filelen.c - -echo 'compiled filelen' - -gfortran -o crossmul crossmul.f90 filelen.o $FFTW_LIB -fopenmp -lrt -lpthread - -echo 'built crossmul' - -gfortran -o makecc makecc.f90 filelen.o -fopenmp -lrt -lpthread - -echo 'finished with int directory' - -cd .. -cd sbas -gfortran -o sbas sbas.f90 svd.f90 -fopenmp -lrt -lpthread -echo 'built sbas in sbas directory' - -cd .. -cd ps -gfortran -o cosine_sim cosine_sim.f90 -fopenmp $FFTW_LIB -gfortran -o psinterp psinterp.f90 -fopenmp -echo 'Built cosine_sim and psinterp in ps directory' - -cd .. -tar xf snaphu_v2_0b0_0_0.tar -cd snaphu_v2.0b0.0.0/src -make CFLAGS=-O3 -s - -cd .. -if [ -e $PROC_HOME/bin ]; then echo "copying snaphu"; else mkdir $PROC_HOME/bin; fi -cp bin/snaphu $PROC_HOME/bin/snaphu -cd .. - -echo 'built snaphu' - -cd sentinel - -gcc -c filelen.c io.c sentinel_raw_process.c decode_line_memory.c -lm -fopenmp - -echo 'built raw_process components in sentinel' - -nvcc -gencode arch=compute_89,code=sm_89 -c azimuth_compress.cu -Wno-deprecated-gpu-targets - -gfortran -c processsub.f90 backprojectgpusub.f90 bounds.f90 orbitrangetime.f90 latlon.f90 intp_orbit.f90 radar_to_xyz.f90 unitvec.f90 tcnbasis.f90 curvature.f90 cross.f90 orbithermite.f sentineltimingsub.f90 getburststatevectors.f90 -ffixed-line-length-none -fopenmp - -nvcc -o sentinel_raw_process sentinel_raw_process.o decode_line_memory.o processsub.o backprojectgpusub.o azimuth_compress.o bounds.o orbitrangetime.o latlon.o intp_orbit.o radar_to_xyz.o unitvec.o tcnbasis.o curvature.o cross.o orbithermite.o filelen.o io.o sentineltimingsub.o getburststatevectors.o $FFTW_LIB -lstdc++ -lgfortran -lgomp -cd .. diff --git a/src/hyp3_back_projection/back_projection.py b/src/hyp3_back_projection/back_projection.py index cbbe7f2..d9b43ab 100644 --- a/src/hyp3_back_projection/back_projection.py +++ b/src/hyp3_back_projection/back_projection.py @@ -9,6 +9,7 @@ from typing import Iterable, Optional from hyp3lib.aws import upload_file_to_s3 +from hyp3lib.util import string_is_true from shapely import unary_union from hyp3_back_projection import dem, utils @@ -30,26 +31,29 @@ def create_param_file(dem_path: Path, dem_rsc_path: Path, output_dir: Path): f.write('\n'.join(lines)) -def back_project_single_granule(granule_path: Path, orbit_path: Path, work_dir: Path) -> None: +def back_project_single_granule(granule_path: Path, orbit_path: Path, work_dir: Path, gpu: bool = False) -> None: """Back-project a single Sentinel-1 level-0 granule. Args: granule_path: Path to the granule to back-project orbit_path: Path to the orbit file for the granule + work_dir: Working directory for processing + gpu: Use the GPU-based version of the workflow, defaults to False """ required_files = ['elevation.dem', 'elevation.dem.rsc', 'params'] for file in required_files: if not (work_dir / file).exists(): raise FileNotFoundError(f'Missing required file: {file}') + script = 'sentinel/sentinel_scene_gpu.py' if gpu else 'sentinel/sentinel_scene_cpu.py' args = [str(granule_path.with_suffix('')), str(orbit_path)] - utils.call_stanford_module('sentinel/sentinel_scene_cpu.py', args, work_dir=work_dir) + utils.call_stanford_module(script, args, work_dir=work_dir) patterns = ['*hgt*', 'dem*', 'DEM*', 'q*', '*positionburst*'] for pattern in patterns: [f.unlink() for f in work_dir.glob(pattern)] -def create_product(work_dir): +def create_product(work_dir) -> Path: """Create a product zip file. Includes files needed for further processing (gslc, orbit, and parameter file). @@ -88,7 +92,8 @@ def back_project( bucket: str = None, bucket_prefix: str = '', work_dir: Optional[Path] = None, -) -> Path: + gpu: bool = False, +): """Back-project a set of Sentinel-1 level-0 granules. Args: @@ -100,6 +105,7 @@ def back_project( bucket: AWS S3 bucket for uploading the final product(s) bucket_prefix: Add a bucket prefix to the product(s) work_dir: Working directory for processing + gpu: Use the GPU-based version of the workflow """ utils.set_creds('EARTHDATA', earthdata_username, earthdata_password) utils.set_creds('ESA', esa_username, esa_password) @@ -120,7 +126,7 @@ def back_project( create_param_file(dem_path, dem_path.with_suffix('.dem.rsc'), work_dir) for granule_path, orbit_path in back_project_args: - back_project_single_granule(granule_path, orbit_path, work_dir=work_dir) + back_project_single_granule(granule_path, orbit_path, work_dir=work_dir, gpu=gpu) utils.call_stanford_module('util/merge_slcs.py', work_dir=work_dir) @@ -146,6 +152,7 @@ def main(): parser.add_argument('--esa-password', default=None, help="Password for ESA's Copernicus Data Space Ecosystem") parser.add_argument('--bucket', help='AWS S3 bucket HyP3 for upload the final product(s)') parser.add_argument('--bucket-prefix', default='', help='Add a bucket prefix to product(s)') + parser.add_argument('--gpu', type=string_is_true, default=False, help='Use the GPU-based version of the workflow.') parser.add_argument('granules', nargs='+', help='Level-0 S1 granule to back-project.') args = parser.parse_args() From 1ca96e44baf90592256be5edbfeccd3f6f512a5a Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Tue, 7 May 2024 08:58:43 -0500 Subject: [PATCH 08/23] correct script name --- src/hyp3_back_projection/back_projection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hyp3_back_projection/back_projection.py b/src/hyp3_back_projection/back_projection.py index d9b43ab..10a1700 100644 --- a/src/hyp3_back_projection/back_projection.py +++ b/src/hyp3_back_projection/back_projection.py @@ -45,7 +45,7 @@ def back_project_single_granule(granule_path: Path, orbit_path: Path, work_dir: if not (work_dir / file).exists(): raise FileNotFoundError(f'Missing required file: {file}') - script = 'sentinel/sentinel_scene_gpu.py' if gpu else 'sentinel/sentinel_scene_cpu.py' + script = 'sentinel/sentinel_scene_multigpu.py' if gpu else 'sentinel/sentinel_scene_cpu.py' args = [str(granule_path.with_suffix('')), str(orbit_path)] utils.call_stanford_module(script, args, work_dir=work_dir) patterns = ['*hgt*', 'dem*', 'DEM*', 'q*', '*positionburst*'] From 9e9b0ecb5b247caf27b31adc2bd5fc0a16b9bf02 Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Tue, 7 May 2024 09:40:38 -0500 Subject: [PATCH 09/23] update arg logic --- src/hyp3_back_projection/back_projection.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/hyp3_back_projection/back_projection.py b/src/hyp3_back_projection/back_projection.py index 10a1700..32a7119 100644 --- a/src/hyp3_back_projection/back_projection.py +++ b/src/hyp3_back_projection/back_projection.py @@ -9,7 +9,6 @@ from typing import Iterable, Optional from hyp3lib.aws import upload_file_to_s3 -from hyp3lib.util import string_is_true from shapely import unary_union from hyp3_back_projection import dem, utils @@ -152,7 +151,7 @@ def main(): parser.add_argument('--esa-password', default=None, help="Password for ESA's Copernicus Data Space Ecosystem") parser.add_argument('--bucket', help='AWS S3 bucket HyP3 for upload the final product(s)') parser.add_argument('--bucket-prefix', default='', help='Add a bucket prefix to product(s)') - parser.add_argument('--gpu', type=string_is_true, default=False, help='Use the GPU-based version of the workflow.') + parser.add_argument('--gpu', default=False, action='store_true', help='Use the GPU-based version of the workflow.') parser.add_argument('granules', nargs='+', help='Level-0 S1 granule to back-project.') args = parser.parse_args() From 2d9cb3eee04b546157bcab2227ddf45a9ae0b4be Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Wed, 8 May 2024 15:27:15 -0500 Subject: [PATCH 10/23] changed from yes to true --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8856b7f..bad764b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ LABEL org.opencontainers.image.documentation="https://hyp3-docs.asf.alaska.edu" ARG DEBIAN_FRONTEND=noninteractive -ARG USEGPU="false" +ARG USEGPU="true" ENV USEGPU=${USEGPU} ENV PYTHONDONTWRITEBYTECODE=true @@ -43,7 +43,7 @@ SHELL ["/bin/bash", "-l", "-c"] COPY ./scripts/install_cuda.sh ./ RUN chmod +x ./install_cuda.sh -RUN if [[ $USEGPU == "yes" ]] ; then ./install_cuda.sh ; else echo "Skipping CUDA install..." ; fi +RUN if [[ $USEGPU == "true" ]] ; then ./install_cuda.sh ; else echo "Skipping CUDA install..." ; fi USER ${CONDA_UID} WORKDIR /home/conda/ From 1cd89b3b7fe52587711d3e7330a89b76734408ef Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Wed, 8 May 2024 15:42:03 -0500 Subject: [PATCH 11/23] make gpu not default --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index bad764b..82d5bb8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ LABEL org.opencontainers.image.documentation="https://hyp3-docs.asf.alaska.edu" ARG DEBIAN_FRONTEND=noninteractive -ARG USEGPU="true" +ARG USEGPU="false" ENV USEGPU=${USEGPU} ENV PYTHONDONTWRITEBYTECODE=true From 2935dfd9546bdc83fadda38e42e20cf1ddd8a028 Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Wed, 8 May 2024 16:02:23 -0500 Subject: [PATCH 12/23] add cuda paths using docker's ENV rather than .bashrc --- Dockerfile | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 82d5bb8..deb94e2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -48,10 +48,8 @@ RUN if [[ $USEGPU == "true" ]] ; then ./install_cuda.sh ; else echo "Skipping CU USER ${CONDA_UID} WORKDIR /home/conda/ -RUN if [[ $USEGPU == "true" ]] ; then \ - echo "export PATH="/usr/local/cuda-12.4/bin:$PATH"" >> .bashrc && \ - echo "export LD_LIBRARY_PATH="/usr/local/cuda-12.4/lib64:$LD_LIBRARY_PATH"" >> .bashrc \ - ; else echo "Skipping exporting CUDA path." ; fi +ENV PATH="$PATH:/usr/local/cuda-12.4/bin" +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda-12.4/lib64" RUN curl -sL https://github.com/ASFHyP3/back-projection/archive/refs/tags/v${BACK_PROJECTION_TAG}.tar.gz > ./back-projection.tar.gz && \ mkdir -p ./back-projection && \ From 0cdbcec2076c863b9cfe125585a1eb46cf8d5bab Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Thu, 9 May 2024 08:51:00 -0500 Subject: [PATCH 13/23] use cuda base image for GPU --- Dockerfile | 14 +------ Dockerfile.gpu | 86 +++++++++++++++++++++++++++++++++++++++++ scripts/build_proc.sh | 4 +- scripts/install_cuda.sh | 11 ------ 4 files changed, 91 insertions(+), 24 deletions(-) create mode 100644 Dockerfile.gpu delete mode 100644 scripts/install_cuda.sh diff --git a/Dockerfile b/Dockerfile index 8856b7f..954d99c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,14 +18,13 @@ LABEL org.opencontainers.image.documentation="https://hyp3-docs.asf.alaska.edu" ARG DEBIAN_FRONTEND=noninteractive -ARG USEGPU="false" -ENV USEGPU=${USEGPU} +ENV USEGPU="false" ENV PYTHONDONTWRITEBYTECODE=true ENV PROC_HOME=/home/conda/back-projection ENV MYHOME=/home/conda -RUN apt-get update && apt-get install -y --no-install-recommends unzip vim curl build-essential gfortran libfftw3-dev nvidia-driver-535 && \ +RUN apt-get update && apt-get install -y --no-install-recommends unzip vim curl build-essential gfortran libfftw3-dev && \ apt-get clean && rm -rf /var/lib/apt/lists/* ARG CONDA_UID=1000 @@ -41,18 +40,9 @@ RUN groupadd -g "${CONDA_GID}" --system conda && \ SHELL ["/bin/bash", "-l", "-c"] -COPY ./scripts/install_cuda.sh ./ -RUN chmod +x ./install_cuda.sh -RUN if [[ $USEGPU == "yes" ]] ; then ./install_cuda.sh ; else echo "Skipping CUDA install..." ; fi - USER ${CONDA_UID} WORKDIR /home/conda/ -RUN if [[ $USEGPU == "true" ]] ; then \ - echo "export PATH="/usr/local/cuda-12.4/bin:$PATH"" >> .bashrc && \ - echo "export LD_LIBRARY_PATH="/usr/local/cuda-12.4/lib64:$LD_LIBRARY_PATH"" >> .bashrc \ - ; else echo "Skipping exporting CUDA path." ; fi - RUN curl -sL https://github.com/ASFHyP3/back-projection/archive/refs/tags/v${BACK_PROJECTION_TAG}.tar.gz > ./back-projection.tar.gz && \ mkdir -p ./back-projection && \ tar -xvf ./back-projection.tar.gz -C ./back-projection/ --strip=1 && \ diff --git a/Dockerfile.gpu b/Dockerfile.gpu new file mode 100644 index 0000000..9066135 --- /dev/null +++ b/Dockerfile.gpu @@ -0,0 +1,86 @@ +FROM nvidia/cuda:12.4.1-devel-ubuntu20.04 + +# For opencontainers label definitions, see: +# https://github.com/opencontainers/image-spec/blob/master/annotations.md +LABEL org.opencontainers.image.title="HyP3 back-projection" +LABEL org.opencontainers.image.description="HyP3 plugin for back-projection processing" +LABEL org.opencontainers.image.vendor="Alaska Satellite Facility" +LABEL org.opencontainers.image.authors="ASF Tools Team " +LABEL org.opencontainers.image.licenses="BSD-3-Clause" +LABEL org.opencontainers.image.url="https://github.com/ASFHyP3/hyp3-back-projection" +LABEL org.opencontainers.image.source="https://github.com/ASFHyP3/hyp3-back-projection" +LABEL org.opencontainers.image.documentation="https://hyp3-docs.asf.alaska.edu" + +ARG DEBIAN_FRONTEND=noninteractive +ARG CONDA_UID=1000 +ARG CONDA_GID=1000 +ARG BACK_PROJECTION_TAG=0.2.0 +ARG FFTW_TAG=3.3.9 +ARG MINIFORGE_NAME=Miniforge3 +ARG MINIFORGE_VERSION=24.3.0-0 + +# USEGPU environment variable used by build_proc.sh +ENV USEGPU="true" +ENV CONDA_DIR=/opt/conda +ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 +ENV PATH=${CONDA_DIR}/bin:${PATH} +ENV PYTHONDONTWRITEBYTECODE=true +ENV PROC_HOME=/home/conda/back-projection +ENV MYHOME=/home/conda + +# Conda setup +RUN apt-get update > /dev/null && \ + apt-get install --no-install-recommends --yes \ + wget bzip2 ca-certificates \ + git \ + tini \ + > /dev/null && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + wget --no-hsts --quiet https://github.com/conda-forge/miniforge/releases/download/${MINIFORGE_VERSION}/${MINIFORGE_NAME}-${MINIFORGE_VERSION}-Linux-$(uname -m).sh -O /tmp/miniforge.sh && \ + /bin/bash /tmp/miniforge.sh -b -p ${CONDA_DIR} && \ + rm /tmp/miniforge.sh && \ + conda clean --tarballs --index-cache --packages --yes && \ + find ${CONDA_DIR} -follow -type f -name '*.a' -delete && \ + find ${CONDA_DIR} -follow -type f -name '*.pyc' -delete && \ + conda clean --force-pkgs-dirs --all --yes && \ + echo ". ${CONDA_DIR}/etc/profile.d/conda.sh && conda activate base" >> /etc/skel/.bashrc && \ + echo ". ${CONDA_DIR}/etc/profile.d/conda.sh && conda activate base" >> ~/.bashrc + +RUN apt-get update && apt-get install -y --no-install-recommends unzip vim curl build-essential gfortran libfftw3-dev && \ + apt-get clean && rm -rf /var/lib/apt/lists/* + +RUN groupadd -g "${CONDA_GID}" --system conda && \ + useradd -l -u "${CONDA_UID}" -g "${CONDA_GID}" --system -d /home/conda -m -s /bin/bash conda && \ + chown -R conda:conda /opt && \ + echo ". /opt/conda/etc/profile.d/conda.sh" >> /home/conda/.profile && \ + echo "conda activate base" >> /home/conda/.profile + +SHELL ["/bin/bash", "-l", "-c"] + +USER ${CONDA_UID} +WORKDIR /home/conda/ + +RUN curl -sL https://github.com/ASFHyP3/back-projection/archive/refs/tags/v${BACK_PROJECTION_TAG}.tar.gz > ./back-projection.tar.gz && \ + mkdir -p ./back-projection && \ + tar -xvf ./back-projection.tar.gz -C ./back-projection/ --strip=1 && \ + rm ./back-projection.tar.gz && \ + rm -rf ./back-projection/fft + +COPY --chown=${CONDA_UID}:${CONDA_GID} ./scripts/build_proc.sh ./back-projection +RUN cd /home/conda/back-projection && \ + chmod +x ./build_proc.sh && \ + ./build_proc.sh && \ + find $PROC_HOME -type f -name "*.py" -exec chmod +x {} + && \ + cd /home/conda/ + +COPY --chown=${CONDA_UID}:${CONDA_GID} . /hyp3-back-projection/ + +RUN mamba env create -f /hyp3-back-projection/environment.yml && \ + conda clean -afy && \ + conda activate hyp3-back-projection && \ + sed -i 's/conda activate base/conda activate hyp3-back-projection/g' /home/conda/.profile && \ + python -m pip install --no-cache-dir /hyp3-back-projection + +ENTRYPOINT ["/hyp3-back-projection/src/hyp3_back_projection/etc/entrypoint.sh"] +CMD ["-h"] diff --git a/scripts/build_proc.sh b/scripts/build_proc.sh index f3cfae1..08e080e 100644 --- a/scripts/build_proc.sh +++ b/scripts/build_proc.sh @@ -3,8 +3,10 @@ MULTIARCH_DIR=/usr/lib/$(gcc -print-multiarch) FFTW_LIB=$MULTIARCH_DIR/libfftw3f.a echo 'using FFTW library:' $FFTW_LIB +if [[ "$USEGPU" == "true" ]]; then + echo 'building with GPU support' +fi -# Works cd DEM gfortran -o mosaicDEM mosaicDEM.f90 gfortran -o createspecialdem createspecialdem.f90 diff --git a/scripts/install_cuda.sh b/scripts/install_cuda.sh deleted file mode 100644 index 2102f9e..0000000 --- a/scripts/install_cuda.sh +++ /dev/null @@ -1,11 +0,0 @@ -# Script to install nvidia drivers and the nvidia cuda toolkit. - -# Install Cuda Toolkit 12.4 -# Source: https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=20.04&target_type=deb_local -wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin && \ -mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 && \ -wget https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-ubuntu2004-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ -dpkg -i cuda-repo-ubuntu2004-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ -cp /var/cuda-repo-ubuntu2004-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/ && \ -apt-get update && \ -apt-get -y install cuda-toolkit-12-4 From cf1033ccdd76eff53a4d639acbe56f5a9c30d6f6 Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Thu, 9 May 2024 15:37:19 -0500 Subject: [PATCH 14/23] swith to use of process_parallel for gpu case --- src/hyp3_back_projection/back_projection.py | 68 +++++++++++++++------ src/hyp3_back_projection/utils.py | 13 +++- tests/test_back_projection.py | 4 +- 3 files changed, 63 insertions(+), 22 deletions(-) diff --git a/src/hyp3_back_projection/back_projection.py b/src/hyp3_back_projection/back_projection.py index 32a7119..4c9211f 100644 --- a/src/hyp3_back_projection/back_projection.py +++ b/src/hyp3_back_projection/back_projection.py @@ -30,28 +30,58 @@ def create_param_file(dem_path: Path, dem_rsc_path: Path, output_dir: Path): f.write('\n'.join(lines)) -def back_project_single_granule(granule_path: Path, orbit_path: Path, work_dir: Path, gpu: bool = False) -> None: - """Back-project a single Sentinel-1 level-0 granule. - - Args: - granule_path: Path to the granule to back-project - orbit_path: Path to the orbit file for the granule - work_dir: Working directory for processing - gpu: Use the GPU-based version of the workflow, defaults to False - """ - required_files = ['elevation.dem', 'elevation.dem.rsc', 'params'] +def check_required_files(required_files: Iterable, work_dir: Path) -> None: for file in required_files: if not (work_dir / file).exists(): raise FileNotFoundError(f'Missing required file: {file}') - script = 'sentinel/sentinel_scene_multigpu.py' if gpu else 'sentinel/sentinel_scene_cpu.py' - args = [str(granule_path.with_suffix('')), str(orbit_path)] - utils.call_stanford_module(script, args, work_dir=work_dir) + +def clean_up_after_back_projection(work_dir: Path) -> None: patterns = ['*hgt*', 'dem*', 'DEM*', 'q*', '*positionburst*'] for pattern in patterns: [f.unlink() for f in work_dir.glob(pattern)] +def back_project_cpu(granule_orbit_pairs: Iterable, work_dir: Path) -> None: + """Back-project a set of Sentinel-1 level-0 granules using the CPU-based workflow. + + Args: + granule_orbit_pairs: List of tuples of granule and orbit file paths + work_dir: Working directory for processing + """ + check_required_files(['elevation.dem', 'elevation.dem.rsc', 'params']) + + for granule_path, orbit_path in granule_orbit_pairs: + args = [str(granule_path.with_suffix('')), str(orbit_path)] + utils.call_stanford_module('sentinel/sentinel_scene_cpu.py', args, work_dir=work_dir) + + clean_up_after_back_projection(work_dir) + + +def create_zipped_safe_list(granule_paths: Iterable, work_dir: Path): + """Create a list of the zipped granules to process.""" + with open(work_dir / 'ziplist', 'w') as f: + for granule_path in granule_paths: + f.write(f'{granule_path.name}\n') + + +def back_project_gpu(granule_orbit_pairs: Iterable, work_dir: Path) -> None: + """Back-project a set of Sentinel-1 level-0 granules using the GPU-based workflow. + + Args: + granule_orbit_pairs: List of tuples of granule and orbit file paths + work_dir: Working directory for processing + """ + n_gpus = utils.how_many_gpus() + create_zipped_safe_list([x[0] for x in granule_orbit_pairs], work_dir) + + check_required_files(['elevation.dem', 'elevation.dem.rsc', 'params', 'ziplist']) + + utils.call_stanford_module('sentinel/process_parallel.py', ['ziplist', str(n_gpus)], work_dir=work_dir) + + clean_up_after_back_projection(work_dir) + + def create_product(work_dir) -> Path: """Create a product zip file. Includes files needed for further processing (gslc, orbit, and parameter file). @@ -113,19 +143,21 @@ def back_project( print('Downloading data...') bboxs = [] - back_project_args = [] + granule_orbit_pairs = [] for granule in granules: granule_path, granule_bbox = utils.download_raw_granule(granule, work_dir) orbit_path = utils.download_orbit(granule, work_dir) bboxs.append(granule_bbox) - back_project_args.append((granule_path, orbit_path)) + granule_orbit_pairs.append((granule_path, orbit_path)) full_bbox = unary_union(bboxs).buffer(0.1) dem_path = dem.download_dem_for_back_projection(full_bbox, work_dir) create_param_file(dem_path, dem_path.with_suffix('.dem.rsc'), work_dir) - for granule_path, orbit_path in back_project_args: - back_project_single_granule(granule_path, orbit_path, work_dir=work_dir, gpu=gpu) + if gpu: + back_project_gpu(granule_orbit_pairs, work_dir=work_dir) + else: + back_project_cpu(granule_orbit_pairs, work_dir=work_dir) utils.call_stanford_module('util/merge_slcs.py', work_dir=work_dir) @@ -133,7 +165,7 @@ def back_project( zip_path = create_product(work_dir) upload_file_to_s3(zip_path, bucket, bucket_prefix) - print(f'Finish back-projection for {list(work_dir.glob("S1*.geo"))[0].with_suffix("").name}!') + print(f'Finished back-projection for {list(work_dir.glob("S1*.geo"))[0].with_suffix("").name}!') def main(): diff --git a/src/hyp3_back_projection/utils.py b/src/hyp3_back_projection/utils.py index 5408a57..74d6e0a 100644 --- a/src/hyp3_back_projection/utils.py +++ b/src/hyp3_back_projection/utils.py @@ -135,7 +135,7 @@ def get_earthdata_credentials() -> Tuple[str, str]: ) -def download_raw_granule(granule_name: str, output_dir: Path) -> Tuple[Path, Polygon]: +def download_raw_granule(granule_name: str, output_dir: Path, unzip: bool = False) -> Tuple[Path, Polygon]: """Download a S1 granule using asf_search. Return its path and buffered extent. @@ -160,7 +160,7 @@ def download_raw_granule(granule_name: str, output_dir: Path) -> Tuple[Path, Pol if not out_path.exists() and not zip_path.exists(): result.download(path=output_dir, session=session) - if not out_path.exists(): + if not out_path.exists() and unzip: with ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall('.') @@ -201,3 +201,12 @@ def call_stanford_module(local_name, args: List = [], work_dir: Optional[Path] = args = [str(x) for x in args] print(f'Calling {local_name} {" ".join(args)} in directory {work_dir}') subprocess.run([script, *args], cwd=work_dir, check=True) + + +def how_many_gpus(): + """Get the number of GPUs available on the system using Stanford script.""" + cmd = (get_proc_home() / 'sentinel' / 'howmanygpus').resolve() + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) + (param, err) = proc.communicate() + ngpus = int(str(param, 'UTF-8').split()[0]) + return ngpus diff --git a/tests/test_back_projection.py b/tests/test_back_projection.py index e116242..4d5d608 100644 --- a/tests/test_back_projection.py +++ b/tests/test_back_projection.py @@ -24,7 +24,7 @@ def test_back_project_single_granule(tmp_path, monkeypatch): granule_path = tmp_path / 'granule.SAFE' orbit_path = tmp_path / 'orbit.xml' with pytest.raises(FileNotFoundError): - back_projection.back_project_single_granule(granule_path, orbit_path, tmp_path) + back_projection.back_project_cpu(granule_path, orbit_path, tmp_path) for f in ['elevation.dem', 'elevation.dem.rsc', 'params']: (tmp_path / f).touch() @@ -35,7 +35,7 @@ def test_back_project_single_granule(tmp_path, monkeypatch): with monkeypatch.context() as m: mock_call_stanford_module = mock.Mock() m.setattr(utils, 'call_stanford_module', mock_call_stanford_module) - back_projection.back_project_single_granule(granule_path, orbit_path, tmp_path) + back_projection.back_project_cpu(granule_path, orbit_path, tmp_path) mock_call_stanford_module.assert_called_once_with( 'sentinel/sentinel_scene_cpu.py', [str(granule_path.with_suffix('')), str(orbit_path)], From 2725a0f63d6d131ca8468207e425083b8dab3820 Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Thu, 9 May 2024 15:43:26 -0500 Subject: [PATCH 15/23] fix unzip download option --- src/hyp3_back_projection/utils.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/hyp3_back_projection/utils.py b/src/hyp3_back_projection/utils.py index 74d6e0a..6f45c50 100644 --- a/src/hyp3_back_projection/utils.py +++ b/src/hyp3_back_projection/utils.py @@ -155,16 +155,19 @@ def download_raw_granule(granule_name: str, output_dir: Path, unzip: bool = Fals bbox = shape(result.geojson()['geometry']) zip_path = output_dir / f'{granule_name[:-4]}.zip' - out_path = output_dir / f'{granule_name[:-4]}.SAFE' + if unzip: + out_path = zip_path + else: + out_path = output_dir / f'{granule_name[:-4]}.SAFE' if not out_path.exists() and not zip_path.exists(): result.download(path=output_dir, session=session) - if not out_path.exists() and unzip: + if not out_path.exists(): with ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall('.') - if zip_path.exists(): + if zip_path.exists() and unzip: zip_path.unlink() return out_path, bbox From a6f6cf9b74ffbecc5dfafbabfe3e8715da88093f Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Thu, 9 May 2024 16:11:57 -0500 Subject: [PATCH 16/23] fix small bugs --- src/hyp3_back_projection/back_projection.py | 7 ++++--- src/hyp3_back_projection/utils.py | 18 +++++++++--------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/hyp3_back_projection/back_projection.py b/src/hyp3_back_projection/back_projection.py index 4c9211f..48f19c0 100644 --- a/src/hyp3_back_projection/back_projection.py +++ b/src/hyp3_back_projection/back_projection.py @@ -49,7 +49,7 @@ def back_project_cpu(granule_orbit_pairs: Iterable, work_dir: Path) -> None: granule_orbit_pairs: List of tuples of granule and orbit file paths work_dir: Working directory for processing """ - check_required_files(['elevation.dem', 'elevation.dem.rsc', 'params']) + check_required_files(['elevation.dem', 'elevation.dem.rsc', 'params'], work_dir) for granule_path, orbit_path in granule_orbit_pairs: args = [str(granule_path.with_suffix('')), str(orbit_path)] @@ -73,7 +73,7 @@ def back_project_gpu(granule_orbit_pairs: Iterable, work_dir: Path) -> None: work_dir: Working directory for processing """ n_gpus = utils.how_many_gpus() - create_zipped_safe_list([x[0] for x in granule_orbit_pairs], work_dir) + create_zipped_safe_list([x[0] for x in granule_orbit_pairs], work_dir=work_dir) check_required_files(['elevation.dem', 'elevation.dem.rsc', 'params', 'ziplist']) @@ -145,7 +145,8 @@ def back_project( bboxs = [] granule_orbit_pairs = [] for granule in granules: - granule_path, granule_bbox = utils.download_raw_granule(granule, work_dir) + unzip = True if not gpu else False + granule_path, granule_bbox = utils.download_raw_granule(granule, work_dir, unzip) orbit_path = utils.download_orbit(granule, work_dir) bboxs.append(granule_bbox) granule_orbit_pairs.append((granule_path, orbit_path)) diff --git a/src/hyp3_back_projection/utils.py b/src/hyp3_back_projection/utils.py index 6f45c50..85a5f7b 100644 --- a/src/hyp3_back_projection/utils.py +++ b/src/hyp3_back_projection/utils.py @@ -157,18 +157,18 @@ def download_raw_granule(granule_name: str, output_dir: Path, unzip: bool = Fals zip_path = output_dir / f'{granule_name[:-4]}.zip' if unzip: out_path = zip_path + result.download(path=output_dir, session=session) else: out_path = output_dir / f'{granule_name[:-4]}.SAFE' + if not out_path.exists() and not zip_path.exists(): + result.download(path=output_dir, session=session) - if not out_path.exists() and not zip_path.exists(): - result.download(path=output_dir, session=session) - - if not out_path.exists(): - with ZipFile(zip_path, 'r') as zip_ref: - zip_ref.extractall('.') + if not out_path.exists(): + with ZipFile(zip_path, 'r') as zip_ref: + zip_ref.extractall('.') - if zip_path.exists() and unzip: - zip_path.unlink() + if zip_path.exists() and unzip: + zip_path.unlink() return out_path, bbox @@ -209,7 +209,7 @@ def call_stanford_module(local_name, args: List = [], work_dir: Optional[Path] = def how_many_gpus(): """Get the number of GPUs available on the system using Stanford script.""" cmd = (get_proc_home() / 'sentinel' / 'howmanygpus').resolve() - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) + proc = subprocess.Popen(str(cmd), stdout=subprocess.PIPE, shell=True) (param, err) = proc.communicate() ngpus = int(str(param, 'UTF-8').split()[0]) return ngpus From e1518caa041a0231bb56bb6630a119e4d658349e Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Thu, 9 May 2024 16:26:56 -0500 Subject: [PATCH 17/23] swith back to using sentinel_scene_multigpu --- src/hyp3_back_projection/back_projection.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/hyp3_back_projection/back_projection.py b/src/hyp3_back_projection/back_projection.py index 48f19c0..c2cb08a 100644 --- a/src/hyp3_back_projection/back_projection.py +++ b/src/hyp3_back_projection/back_projection.py @@ -4,6 +4,7 @@ import argparse import logging +import os import zipfile from pathlib import Path from typing import Iterable, Optional @@ -42,7 +43,7 @@ def clean_up_after_back_projection(work_dir: Path) -> None: [f.unlink() for f in work_dir.glob(pattern)] -def back_project_cpu(granule_orbit_pairs: Iterable, work_dir: Path) -> None: +def back_project_granules(granule_orbit_pairs: Iterable, work_dir: Path, gpu: bool = False) -> None: """Back-project a set of Sentinel-1 level-0 granules using the CPU-based workflow. Args: @@ -51,9 +52,14 @@ def back_project_cpu(granule_orbit_pairs: Iterable, work_dir: Path) -> None: """ check_required_files(['elevation.dem', 'elevation.dem.rsc', 'params'], work_dir) + if gpu: + os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' + os.environ['CUDA_VISIBLE_DEVICES'] = 1 + + cmd = 'sentinel/sentinel_scene_multigpu.py' if gpu else 'sentinel/sentinel_scene_cpu.py' for granule_path, orbit_path in granule_orbit_pairs: args = [str(granule_path.with_suffix('')), str(orbit_path)] - utils.call_stanford_module('sentinel/sentinel_scene_cpu.py', args, work_dir=work_dir) + utils.call_stanford_module(cmd, args, work_dir=work_dir) clean_up_after_back_projection(work_dir) @@ -155,10 +161,7 @@ def back_project( dem_path = dem.download_dem_for_back_projection(full_bbox, work_dir) create_param_file(dem_path, dem_path.with_suffix('.dem.rsc'), work_dir) - if gpu: - back_project_gpu(granule_orbit_pairs, work_dir=work_dir) - else: - back_project_cpu(granule_orbit_pairs, work_dir=work_dir) + back_project_granules(granule_orbit_pairs, work_dir=work_dir, gpu=gpu) utils.call_stanford_module('util/merge_slcs.py', work_dir=work_dir) From 3602d7d1c562916924c866f97df0cb39c28f3206 Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Thu, 9 May 2024 16:36:30 -0500 Subject: [PATCH 18/23] fix new bugs --- src/hyp3_back_projection/back_projection.py | 5 ++--- src/hyp3_back_projection/utils.py | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/hyp3_back_projection/back_projection.py b/src/hyp3_back_projection/back_projection.py index c2cb08a..7c15558 100644 --- a/src/hyp3_back_projection/back_projection.py +++ b/src/hyp3_back_projection/back_projection.py @@ -54,7 +54,7 @@ def back_project_granules(granule_orbit_pairs: Iterable, work_dir: Path, gpu: bo if gpu: os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' - os.environ['CUDA_VISIBLE_DEVICES'] = 1 + os.environ['CUDA_VISIBLE_DEVICES'] = '1' cmd = 'sentinel/sentinel_scene_multigpu.py' if gpu else 'sentinel/sentinel_scene_cpu.py' for granule_path, orbit_path in granule_orbit_pairs: @@ -151,8 +151,7 @@ def back_project( bboxs = [] granule_orbit_pairs = [] for granule in granules: - unzip = True if not gpu else False - granule_path, granule_bbox = utils.download_raw_granule(granule, work_dir, unzip) + granule_path, granule_bbox = utils.download_raw_granule(granule, work_dir, unzip=True) orbit_path = utils.download_orbit(granule, work_dir) bboxs.append(granule_bbox) granule_orbit_pairs.append((granule_path, orbit_path)) diff --git a/src/hyp3_back_projection/utils.py b/src/hyp3_back_projection/utils.py index 85a5f7b..dc72bb2 100644 --- a/src/hyp3_back_projection/utils.py +++ b/src/hyp3_back_projection/utils.py @@ -157,7 +157,8 @@ def download_raw_granule(granule_name: str, output_dir: Path, unzip: bool = Fals zip_path = output_dir / f'{granule_name[:-4]}.zip' if unzip: out_path = zip_path - result.download(path=output_dir, session=session) + if not out_path.exists(): + result.download(path=output_dir, session=session) else: out_path = output_dir / f'{granule_name[:-4]}.SAFE' if not out_path.exists() and not zip_path.exists(): From 661cc2a9be3cba6d1300fb3685a11045d67b8672 Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Mon, 13 May 2024 09:13:29 -0500 Subject: [PATCH 19/23] fix small bugs --- src/hyp3_back_projection/back_projection.py | 2 +- src/hyp3_back_projection/utils.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/hyp3_back_projection/back_projection.py b/src/hyp3_back_projection/back_projection.py index 7c15558..9d3585b 100644 --- a/src/hyp3_back_projection/back_projection.py +++ b/src/hyp3_back_projection/back_projection.py @@ -54,7 +54,7 @@ def back_project_granules(granule_orbit_pairs: Iterable, work_dir: Path, gpu: bo if gpu: os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' - os.environ['CUDA_VISIBLE_DEVICES'] = '1' + os.environ['CUDA_VISIBLE_DEVICES'] = '0' cmd = 'sentinel/sentinel_scene_multigpu.py' if gpu else 'sentinel/sentinel_scene_cpu.py' for granule_path, orbit_path in granule_orbit_pairs: diff --git a/src/hyp3_back_projection/utils.py b/src/hyp3_back_projection/utils.py index dc72bb2..5994109 100644 --- a/src/hyp3_back_projection/utils.py +++ b/src/hyp3_back_projection/utils.py @@ -142,6 +142,7 @@ def download_raw_granule(granule_name: str, output_dir: Path, unzip: bool = Fals Args: granule_name: Name of the granule to download output_dir: Directory to save the granule in + unzip: Unzip the granule if it is a zip file Returns: Tuple of the granule path and its extent as a Polygon @@ -155,7 +156,7 @@ def download_raw_granule(granule_name: str, output_dir: Path, unzip: bool = Fals bbox = shape(result.geojson()['geometry']) zip_path = output_dir / f'{granule_name[:-4]}.zip' - if unzip: + if not unzip: out_path = zip_path if not out_path.exists(): result.download(path=output_dir, session=session) From 131f30ce1dad6f12aac5191b9b0d9a5acdc5ad0c Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Mon, 13 May 2024 09:52:48 -0500 Subject: [PATCH 20/23] cleanup --- src/hyp3_back_projection/back_projection.py | 24 --------------------- tests/test_back_projection.py | 6 +++--- 2 files changed, 3 insertions(+), 27 deletions(-) diff --git a/src/hyp3_back_projection/back_projection.py b/src/hyp3_back_projection/back_projection.py index 9d3585b..d702eef 100644 --- a/src/hyp3_back_projection/back_projection.py +++ b/src/hyp3_back_projection/back_projection.py @@ -64,30 +64,6 @@ def back_project_granules(granule_orbit_pairs: Iterable, work_dir: Path, gpu: bo clean_up_after_back_projection(work_dir) -def create_zipped_safe_list(granule_paths: Iterable, work_dir: Path): - """Create a list of the zipped granules to process.""" - with open(work_dir / 'ziplist', 'w') as f: - for granule_path in granule_paths: - f.write(f'{granule_path.name}\n') - - -def back_project_gpu(granule_orbit_pairs: Iterable, work_dir: Path) -> None: - """Back-project a set of Sentinel-1 level-0 granules using the GPU-based workflow. - - Args: - granule_orbit_pairs: List of tuples of granule and orbit file paths - work_dir: Working directory for processing - """ - n_gpus = utils.how_many_gpus() - create_zipped_safe_list([x[0] for x in granule_orbit_pairs], work_dir=work_dir) - - check_required_files(['elevation.dem', 'elevation.dem.rsc', 'params', 'ziplist']) - - utils.call_stanford_module('sentinel/process_parallel.py', ['ziplist', str(n_gpus)], work_dir=work_dir) - - clean_up_after_back_projection(work_dir) - - def create_product(work_dir) -> Path: """Create a product zip file. Includes files needed for further processing (gslc, orbit, and parameter file). diff --git a/tests/test_back_projection.py b/tests/test_back_projection.py index 4d5d608..05ee50d 100644 --- a/tests/test_back_projection.py +++ b/tests/test_back_projection.py @@ -20,11 +20,11 @@ def test_create_param_file(tmp_path): assert lines[1] == str(dem_rsc_path) -def test_back_project_single_granule(tmp_path, monkeypatch): +def test_back_project_granules(tmp_path, monkeypatch): granule_path = tmp_path / 'granule.SAFE' orbit_path = tmp_path / 'orbit.xml' with pytest.raises(FileNotFoundError): - back_projection.back_project_cpu(granule_path, orbit_path, tmp_path) + back_projection.back_project_granules([(granule_path, orbit_path)], tmp_path) for f in ['elevation.dem', 'elevation.dem.rsc', 'params']: (tmp_path / f).touch() @@ -35,7 +35,7 @@ def test_back_project_single_granule(tmp_path, monkeypatch): with monkeypatch.context() as m: mock_call_stanford_module = mock.Mock() m.setattr(utils, 'call_stanford_module', mock_call_stanford_module) - back_projection.back_project_cpu(granule_path, orbit_path, tmp_path) + back_projection.back_project_granules([(granule_path, orbit_path)], tmp_path) mock_call_stanford_module.assert_called_once_with( 'sentinel/sentinel_scene_cpu.py', [str(granule_path.with_suffix('')), str(orbit_path)], From b6c9a11aebdbe8be63356ef22b494ec6f364c187 Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Mon, 13 May 2024 10:03:22 -0500 Subject: [PATCH 21/23] add ec2 instructions to readme --- README.md | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 874a4d1..dbbc171 100644 --- a/README.md +++ b/README.md @@ -55,4 +55,32 @@ before, check out this [guide](https://harmony.earthdata.nasa.gov/docs#getting-s ## GPU Setup: In order for Docker to be able to use the host's GPU, the host must have the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/index.html) installed and configured. The process is different for different OS's and Linux distros. The setup process for the most common distros, including Ubuntu, -can be found [here](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#configuration). Make sure to follow the [Docker configuration steps](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#configuration) after installing the package. **This process is not necessary when running in AWS while using an EC2 image made with GPU support.** +can be found [here](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#configuration). Make sure to follow the [Docker configuration steps](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#configuration) after installing the package. + +### EC2 Setup +When running on an EC2 instance, the following setup is recommended: +1. Create a [P3-family EC2 instance](https://aws.amazon.com/ec2/instance-types/p3/) with the [Amazon Linux 2 AMI with NVIDIA TESLA GPU Driver](https://aws.amazon.com/marketplace/pp/prodview-64e4rx3h733ru?sr=0-4&ref_=beagle&applicationId=AWSMPContessa) +1. Install Docker and the nvidia-container-toolkit on the EC2 +```bash +sudo yum-config-manager --disable amzn2-graphics +curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo +sudo yum install docker -y +sudo yum install nvidia-container-toolkit -y +sudo yum-config-manager --enable amzn2-graphics +``` +1. Optionally, set up Docker to not require `sudo` and to start when the instance starts +```bash +sudo systemctl start docker && \ +sudo usermod -a -G docker ec2-user && \ +sudo systemctl enable docker +``` +1. Exit the instance and re-enter +1. To test the GPU setup, run the base NVIDIA container: +```bash +docker run -it --gpus all nvidia/cuda:12.4.1-devel-ubuntu20.04 nvidia-smi +``` +1. Build the actual container and run it: +```bash +docker build -t back-projection:gpu -f Dockerfile.gpu . +docker run --gpus=all --rm -it back-projection:gpu ++process back_projection --help +``` From c22fa6c70265d3bae80dd079dac977825e8b5555 Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Mon, 13 May 2024 10:04:10 -0500 Subject: [PATCH 22/23] fix numbering --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index dbbc171..83b61d9 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ can be found [here](https://docs.nvidia.com/datacenter/cloud-native/container-to ### EC2 Setup When running on an EC2 instance, the following setup is recommended: 1. Create a [P3-family EC2 instance](https://aws.amazon.com/ec2/instance-types/p3/) with the [Amazon Linux 2 AMI with NVIDIA TESLA GPU Driver](https://aws.amazon.com/marketplace/pp/prodview-64e4rx3h733ru?sr=0-4&ref_=beagle&applicationId=AWSMPContessa) -1. Install Docker and the nvidia-container-toolkit on the EC2 +2. Install Docker and the nvidia-container-toolkit on the EC2 ```bash sudo yum-config-manager --disable amzn2-graphics curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo @@ -68,18 +68,18 @@ sudo yum install docker -y sudo yum install nvidia-container-toolkit -y sudo yum-config-manager --enable amzn2-graphics ``` -1. Optionally, set up Docker to not require `sudo` and to start when the instance starts +3. Optionally, set up Docker to not require `sudo` and to start when the instance starts ```bash sudo systemctl start docker && \ sudo usermod -a -G docker ec2-user && \ sudo systemctl enable docker ``` -1. Exit the instance and re-enter -1. To test the GPU setup, run the base NVIDIA container: +4. Exit the instance and re-enter +5. To test the GPU setup, run the base NVIDIA container: ```bash docker run -it --gpus all nvidia/cuda:12.4.1-devel-ubuntu20.04 nvidia-smi ``` -1. Build the actual container and run it: +6. Build the actual container and run it: ```bash docker build -t back-projection:gpu -f Dockerfile.gpu . docker run --gpus=all --rm -it back-projection:gpu ++process back_projection --help From d75eca643468b70e2b6868c9cbea45c606f451c6 Mon Sep 17 00:00:00 2001 From: Forrest Williams Date: Mon, 13 May 2024 10:05:24 -0500 Subject: [PATCH 23/23] add ec2 language --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 83b61d9..933c3f2 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ can be found [here](https://docs.nvidia.com/datacenter/cloud-native/container-to ### EC2 Setup When running on an EC2 instance, the following setup is recommended: 1. Create a [P3-family EC2 instance](https://aws.amazon.com/ec2/instance-types/p3/) with the [Amazon Linux 2 AMI with NVIDIA TESLA GPU Driver](https://aws.amazon.com/marketplace/pp/prodview-64e4rx3h733ru?sr=0-4&ref_=beagle&applicationId=AWSMPContessa) -2. Install Docker and the nvidia-container-toolkit on the EC2 +2. Install Docker and the nvidia-container-toolkit on the EC2 instance: ```bash sudo yum-config-manager --disable amzn2-graphics curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo @@ -68,13 +68,13 @@ sudo yum install docker -y sudo yum install nvidia-container-toolkit -y sudo yum-config-manager --enable amzn2-graphics ``` -3. Optionally, set up Docker to not require `sudo` and to start when the instance starts +3. Optionally, set up Docker to not require `sudo` and to start when the EC2 instance starts ```bash sudo systemctl start docker && \ sudo usermod -a -G docker ec2-user && \ sudo systemctl enable docker ``` -4. Exit the instance and re-enter +4. Exit the EC2 instance and re-enter 5. To test the GPU setup, run the base NVIDIA container: ```bash docker run -it --gpus all nvidia/cuda:12.4.1-devel-ubuntu20.04 nvidia-smi