Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.
Comment: CUDA_VISIBLE_DEVICE to CUDA_VISIBLE_DEVICES

...

Code Block
languagebash
firstline1
titleGPU Job
linenumberstrue
collapsetrue
#!/bin/tcsh
#SBATCH --partition=im1080-gpu
# Directives can be combined on one line
#SBATCH --time=1:00:00
#SBATCH --nodes=1
# 1 CPU can be be paired with only 1 GPU
# GPU jobs can request all 24 CPUs
#SBATCH --ntasks-per-node=1
# Request one GPU for your workload
#SBATCH --gres=gpu:1
# Need both GPUs, use --gres=gpu:2
#SBATCH --job-name myjob

# Source zlmod.csh script to get LMOD in your path
source /share/Apps/compilers/etc/lmod/zlmod.csh
# Copy input and miscellaneous files to run directory
cp ${SLURM_SUBMIT_DIR}/* .

# Load LAMMPS Module
module load lammps 

# Most modules set LOCAL_SCRATCH to /scratch/${SLURM_JOB_USER}/${SLURM_JOB_ID}
# and CEPHFS_SCRATCH to /share/ceph/scratch/${USER}/${SLURM_JOB_ID}
cd ${CEPHFS_SCRATCH} 
# Run LAMMPS for input file in.lj
srun $(which lammps) -in in.lj -sf gpu -pk gpu 1 gpuID ${CUDA_VISIBLE_DEVICEDEVICES} ${CUDA_VISIBLE_DEVICEDEVICES}

# Copy output back to ${SLURM_SUBMIT_DIR} in a subfolder
cd ${SLURM_SUBMIT_DIR}/
mv /share/ceph/scratch/${USER}/${SLURM_JOB_ID} .

# Note that there is no guarantee which device will be assigned to your job.
# If you use 0 or 1 instead of ${CUDA_VISIBLE_DEVICE}, your jobs will be utilizing
#  GPUs assigned to another user
# NAMD: Add "+devices ${CUDA_VISIBLE_DEVICE}" as a command line flag to charmrun
# GROMACS: Add "-gpu_id ${CUDA_VISIBLE_DEVICE}" as a command line flag to mdrun
# If you request both GPUs, then
# LAMMPS: -pk gpu 2 gpuID 0 1
# NAMD: +devices 0,1
# GROMACS: -gpu_id 01

...