Skip to content

Commit f2af294

Browse files
committed
First update for SC25
1 parent 96c9354 commit f2af294

File tree

48 files changed

+644
-672
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+644
-672
lines changed

.archive.mk

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,16 @@
66
# Changelog:
77
# * Nov 2022: The archive is extracted again, then slides.pdf is removed if a patched slides-sc22.pdf is found (which includes an SC22 slide 0 title slide); and then repackaged
88
.PHONY: all
9-
all: tut105-multi-gpu.tar.gz
9+
all: tut113-multi-gpu.tar.gz
1010

11-
SOURCES=$(shell gfind . -maxdepth 1 -mindepth 1 -not -path "./.*" -not -name "tut105-multi-gpu.tar.gz" -printf '%P\n' | sort -h)
11+
SOURCES=$(shell gfind . -maxdepth 1 -mindepth 1 -not -path "./.*" -not -name "tut113-multi-gpu.tar.gz" -printf '%P\n' | sort -h)
1212

13-
tut105-multi-gpu.tar.gz: $(shell find . -not -name "tut105-multi-gpu.tar.gz")
13+
tut113-multi-gpu.tar.gz: $(shell find . -not -name "tut113-multi-gpu.tar.gz")
1414
sed -i '1 i***Please check GitHub repo for latest version of slides: https://github.com/FZJ-JSC/tutorial-multi-gpu/ ***\n' README.md
15-
tar czf $@ --transform 's,^,ISC25-tut105-Multi-GPU/,' --exclude=".*" $(SOURCES)
15+
tar czf $@ --transform 's,^,SC25-tut113-Multi-GPU/,' --exclude=".*" $(SOURCES)
1616
tar xf $@
1717
rm $@
18-
find ISC25-tut105-Multi-GPU/ -not -path './.*' -iname 'slides-*.pdf' -execdir rm slides.pdf \;
19-
tar czf $@ ISC25-tut105-Multi-GPU
20-
rm -rf ISC25-tut105-Multi-GPU
18+
find SC25-tut113-Multi-GPU/ -not -path './.*' -iname 'slides-*.pdf' -execdir rm slides.pdf \;
19+
tar czf $@ SC25-tut113-Multi-GPU
20+
rm -rf SC25-tut113-Multi-GPU
2121
sed -i '1,2d' README.md

.etc/.set-facl-permissions.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
set -x
44

5-
for user in haghighimood1 kraus1 hrywniak1 oden1 garciadegonzalo1 badwaik1 john2; do
5+
for user in haghighimood1 kraus1 hrywniak1 oden1 garciadegonzalo1 badwaik1 john2 appelhans1; do
66
setfacl -m u:$user:rwx -R $PROJECT_training2446/common/
77
setfacl -m u:$user:rwx -R $PROJECT_training2446/env.sh
88
done

.etc/deploy-material.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
rsync --archive --exclude="*minified.pdf" --exclude="tut*" --exclude=".*" --exclude="*-sc*.pdf" --verbose ../ judac:/p/project1/training2526/common/material/
1+
rsync --archive --exclude="*minified.pdf" --exclude="tut*" --exclude=".*" --exclude="*-sc*.pdf" --verbose ../ judac:/p/project1/training2555/common/material/

.etc/deploy.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
rsync --archive --exclude="deploy.sh" --exclude="raw/" --exclude="sc2*-titleslides/" --verbose . judac:/p/project1/training2526/common/environment/
1+
rsync --archive --exclude="deploy.sh" --exclude="raw/" --exclude="sc2*-titleslides/" --verbose . judac:/p/project1/training2555/common/environment/

.etc/instructions-header.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
# SC24 Tutorial: Efficient Distributed GPU Programming for Exascale
1+
# SC25 Tutorial: Efficient Distributed GPU Programming for Exascale
22

3-
- Sunday, November 17, 2024 8:30 AM to 5:30 PM
4-
- Location: B211, Atlanta Convention Center, Georgia, USA
3+
- Sunday, November 16, 2025 8:30 AM to 5:00 PM
4+
- Location: Room 127, St. Louis Convention Center, St. Louis, USA
55
- Program Link:
6-
https://sc24.conference-program.com/presentation/?id=tut123&sess=sess412
6+
https://sc25.conference-program.com/presentation/?id=tut113&sess=sess252

.etc/jsccourse-bashrc.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,10 @@
1111
# Andreas Herten, >2017
1212
################################################
1313
if [ -z "$_JSCCOURSE_ENV_SOURCED" ]; then
14-
project="training2526"
14+
project="training2555"
1515

1616
export JSCCOURSE_DIR_GROUP=/p/project1/$project
17-
export JSCCOURSE_DIR_LOCAL=${JSCCOURSE_DIR_LOCAL_BASE:-$HOME}/ISC25-Multi-GPU-Tutorial
17+
export JSCCOURSE_DIR_LOCAL=${JSCCOURSE_DIR_LOCAL_BASE:-$HOME}/SC25-Multi-GPU-Tutorial
1818

1919
export _JSCCOURSE_ENV_SOURCED="$(date)"
2020
export C_V_D="0,1,2,3"
@@ -24,7 +24,7 @@ if [ -z "$_JSCCOURSE_ENV_SOURCED" ]; then
2424
res=""
2525
currentday=$(date +%d)
2626
if [[ "$currentday" == "13" ]]; then
27-
res="--reservation isc25-mgpu"
27+
res="--reservation sc25-mgpu"
2828
fi
2929

3030
export SLURM_NTASKS=1
@@ -120,7 +120,7 @@ if [[ $- =~ "i" ]]; then
120120

121121
echo ""
122122
echo "*******************************************************************************"
123-
echo " Welcome to the ISC25 Tutorial on Multi-GPU Computing for Exascale! "
123+
echo " Welcome to the SC25 Tutorial on Multi-GPU Computing for Exascale! "
124124
# echo " A default call to get a batch system allocation is stored in \$JSC_ALLOC_CMD!"
125125
# echo " Use it with \`eval \$JSC_ALLOC_CMD\`. The value of \$JSC_ALLOC_CMD is:"
126126
# echo -n " "

.etc/modules.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
module purge
22
module load GCC/13.3.0
3-
module load CUDA/12
3+
module load CUDA/12 #12.6.0
44
module load OpenMPI/5.0.5
55
export MPI_HOME=$EBROOTOPENMPI
66
#export MPI_HOME=$EBROOTPSMPI
7-
module load NCCL/default-CUDA-12
7+
module load NCCL/default-CUDA-12 #2.22.3-1
88
module load NVSHMEM/3.1.7-CUDA-12
99
module load Nsight-Systems/2025.3.1
1010
module load MPI-settings/CUDA

.zenodo.json

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -24,26 +24,31 @@
2424
"orcid": "0000-0002-9670-5296",
2525
"affiliation": "FernUni Hagen",
2626
"name": "Oden, Lena"
27+
},
28+
{
29+
"orcid": "0000-0003-1433-9198",
30+
"affiliation": "NVIDIA",
31+
"name": "Appelhans, David"
2732
}
2833
],
2934

3035
"title": "Efficient Distributed GPU Programming for Exascale",
3136

32-
"publication_date": "2025-06-13",
37+
"publication_date": "2025-11-16",
3338

34-
"description": "<p>Over the past decade, GPUs became ubiquitous in HPC installations around the world, delivering the majority of performance of some of the largest supercomputers (e.g. Summit, Sierra, JUWELS Booster). This trend continues in the recently deployed and upcoming Pre-Exascale and Exascale systems (JUPITER, LUMI, Leonardo; El Capitan, Frontier, Aurora): GPUs are chosen as the core computing devices to enter this next era of HPC.To take advantage of future GPU-accelerated systems with tens of thousands of devices, application developers need to have the proper skills and tools to understand, manage, and optimize distributed GPU applications.In this tutorial, participants will learn techniques to efficiently program large-scale multi-GPU systems. While programming multiple GPUs with MPI is explained in detail, also advanced tuning techniques and complementing programming models like NCCL and NVSHMEM are presented. Tools for analysis are shown and used to motivate and implement performance optimizations. The tutorial teaches fundamental concepts that apply to GPU-accelerated systems in general, taking the NVIDIA platform as an example. It is a combination of lectures and hands-on exercises, using a development system for JUPITER (JEDI), for interactive learning and discovery.</p>",
39+
"description": "<p>Over the past decade, GPUs became ubiquitous in HPC installations around the world, delivering the majority of performance of some of the largest supercomputers, steadily increasing the available compute capacity. Finally, four exascale systems are deployed (Frontier, Aurora, El Capitan, JUPITER), using GPUs as the core computing devices for this era of HPC. To take advantage of these GPU-accelerated systems with tens of thousands of devices, application developers need to have the proper skills and tools to understand, manage, and optimize distributed GPU applications. In this tutorial, participants will learn techniques to efficiently program large-scale multi-GPU systems. While programming multiple GPUs with MPI is explained in detail, also advanced tuning techniques and complementing programming models like NCCL and NVSHMEM are presented. Tools for analysis are shown and used to motivate and implement performance optimizations. The tutorial teaches fundamental concepts that apply to GPU-accelerated systems of any vendor in general, taking the NVIDIA platform as an example. It is a combination of lectures and hands-on exercises, using the JUPITER system for interactive learning and discovery.</p>",
3540

36-
"notes": "Slides and exercises of tutorial presented at ISC High Performance 2025; https://isc.app.swapcard.com/widget/event/isc-high-performance-2025/planning/UGxhbm5pbmdfMjU4MTc5Ng==",
41+
"notes": "Slides and exercises of tutorial presented at SC25 (The International Conference for High Performance Computing, Networking, Storage, and Analysis 2025); https://sc25.conference-program.com/presentation/?id=tut113&sess=sess252",
3742

3843
"access_right": "open",
3944

40-
"conference_title": "ISC 2025",
41-
"conference_acronym": "ISC25",
42-
"conference_dates": "10 June-13 June 2025",
43-
"conference_place": "Hamburg, Germany",
44-
"conference_url": "https://www.isc-hpc.com/",
45+
"conference_title": "SC 2025",
46+
"conference_acronym": "SC25",
47+
"conference_dates": "16 November-21 November 2025",
48+
"conference_place": "St. Louis, Missouri, USA",
49+
"conference_url": "https://sc25.supercomputing.org/",
4550
"conference_session": "Tutorials",
46-
"conference_session_part": "Afternoon",
51+
"conference_session_part": "Day 1",
4752

4853
"upload_type": "lesson"
4954
}

03-H_Multi_GPU_Parallelization/.master/Instructions.ipynb

Lines changed: 33 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,13 @@
44
"cell_type": "markdown",
55
"metadata": {},
66
"source": [
7-
"# SC24 Tutorial: Efficient Distributed GPU Programming for Exascale\n",
7+
"# SC25 Tutorial: Efficient Distributed GPU Programming for Exascale\n",
88
"\n",
9-
"- Sunday, November 17, 2024 8:30 AM to 5:30 PM\n",
10-
"- Location: B211, Atlanta Convention Center, Georgia, USA\n",
11-
"- Program Link:\n",
12-
" https://sc24.conference-program.com/presentation/?id=tut123&sess=sess412\n",
13-
"\n",
14-
"## Hands-On 3: Multi-GPU Parallelization with CUDA-aware MPI\n",
9+
"- Sunday, November 16, 2025 8:30 AM to 5:00 PM\n",
10+
"- Location: Room 127, St. Louis Convention Center, St. Louis, USA\n",
11+
"- Program Link:\n",
12+
" https://sc25.conference-program.com/presentation/?id=tut113&sess=sess252\n",
13+
" \\## Hands-On 3: Multi-GPU Parallelization with CUDA-aware MPI\n",
1514
"\n",
1615
"### Task: Parallelize Jacobi Solver for Multiple GPUs using CUDA-aware MPI\n",
1716
"\n",
@@ -27,23 +26,23 @@
2726
"and `POP` macros). Once you are familiar with the code, please work on\n",
2827
"the `TODOs` in `jacobi.cu`:\n",
2928
"\n",
30-
"- Get the available GPU devices and use it and the local rank to set\n",
31-
" the active GPU for each process\n",
32-
"- Compute the top and bottom neigbhors. We are using\n",
33-
" reflecting/periodic boundaries on top and bottom, so rank0’s Top\n",
34-
" neighbor is (size-1) and rank(size-1) bottom neighbor is rank 0\n",
35-
"- Use MPI_Sendrecv to exchange data between the neighbors\n",
36-
" - use CUDA-aware MPI, so the send - and the receive buffers are\n",
37-
" located in GPU-memory\n",
38-
" - The first newly calculated row (‘iy_start’) is sent to the top\n",
39-
" neigbor and the bottom boundary row (`iy_end`) is received from\n",
40-
" the bottom process.\n",
41-
" - The last calculated row (`iy_end-1`) is send to the bottom\n",
42-
" process and the top boundary (`0`) is received from the top\n",
43-
" - Don’t forget to synchronize the computation on the GPU before\n",
44-
" starting the data transfer\n",
45-
" - use the self-defined MPI_REAL_TYPE. This allows an easy switch\n",
46-
" between single- and double precision\n",
29+
"- Get the available GPU devices and use it and the local rank to set the\n",
30+
" active GPU for each process\n",
31+
"- Compute the top and bottom neigbhors. We are using reflecting/periodic\n",
32+
" boundaries on top and bottom, so rank0’s Top neighbor is (size-1) and\n",
33+
" rank(size-1) bottom neighbor is rank 0\n",
34+
"- Use MPI_Sendrecv to exchange data between the neighbors\n",
35+
" - use CUDA-aware MPI, so the send - and the receive buffers are\n",
36+
" located in GPU-memory\n",
37+
" - The first newly calculated row (‘iy_start’) is sent to the top\n",
38+
" neigbor and the bottom boundary row (`iy_end`) is received from the\n",
39+
" bottom process.\n",
40+
" - The last calculated row (`iy_end-1`) is send to the bottom process\n",
41+
" and the top boundary (`0`) is received from the top\n",
42+
" - Don’t forget to synchronize the computation on the GPU before\n",
43+
" starting the data transfer\n",
44+
" - use the self-defined MPI_REAL_TYPE. This allows an easy switch\n",
45+
" between single- and double precision\n",
4746
"\n",
4847
"Compile with\n",
4948
"\n",
@@ -61,17 +60,17 @@
6160
"\n",
6261
"### Description\n",
6362
"\n",
64-
"- The work distribution of the first task is not ideal, because it can\n",
65-
" lead to the process with the last rank having to calculate\n",
66-
" significantly more than all the others. Therefore, the load\n",
67-
" distribution is to be optimized in this task.\n",
68-
"- Compute the `chunk_size` that each rank gets either (ny - 2) / size\n",
69-
" or (ny - 2) / size + 1 rows.\n",
70-
"- Compute how many processes get (ny - 2) / size resp (ny - 2) /\n",
71-
" size + 1 rows\n",
72-
"- Adapt the computation of (`iy_start_global`)"
63+
"- The work distribution of the first task is not ideal, because it can\n",
64+
" lead to the process with the last rank having to calculate\n",
65+
" significantly more than all the others. Therefore, the load\n",
66+
" distribution is to be optimized in this task.\n",
67+
"- Compute the `chunk_size` that each rank gets either (ny - 2) / size or\n",
68+
" (ny - 2) / size + 1 rows.\n",
69+
"- Compute how many processes get (ny - 2) / size resp (ny - 2) / size +\n",
70+
" 1 rows\n",
71+
"- Adapt the computation of (`iy_start_global`)"
7372
],
74-
"id": "e42b5ab3-f626-4da5-b0c9-52a444cefde8"
73+
"id": "8b73eab2-1e9f-42a8-b366-29ff21d469ea"
7574
}
7675
],
7776
"nbformat": 4,

03-H_Multi_GPU_Parallelization/.master/Instructions.md

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,9 @@
1-
# SC24 Tutorial: Efficient Distributed GPU Programming for Exascale
1+
# SC25 Tutorial: Efficient Distributed GPU Programming for Exascale
22

3-
- Sunday, November 17, 2024 8:30 AM to 5:30 PM
4-
- Location: B211, Atlanta Convention Center, Georgia, USA
3+
- Sunday, November 16, 2025 8:30 AM to 5:00 PM
4+
- Location: Room 127, St. Louis Convention Center, St. Louis, USA
55
- Program Link:
6-
https://sc24.conference-program.com/presentation/?id=tut123&sess=sess412
7-
6+
https://sc25.conference-program.com/presentation/?id=tut113&sess=sess252
87
## Hands-On 3: Multi-GPU Parallelization with CUDA-aware MPI
98

109
### Task: Parallelize Jacobi Solver for Multiple GPUs using CUDA-aware MPI

0 commit comments

Comments
 (0)