Ares supercomputer
#!/bin/bash
#SBATCH --partition plgrid
#SBATCH --job-name cryosparc-master
#SBATCH --nodes 1
#SBATCH --ntasks-per-node 1
#SBATCH --mem 15GB
#SBATCH --time 72:00:00
#SBATCH -C localfs
#SBATCH -A plgrisa-cpu
#SBATCH --dependency=singleton
#SBATCH --output cryosparc-master-log-%J.txt
#SBATCH --signal=B:2@240
echo "Job run" > test.txt
Prepare environment for simulations
Install packages
conda install -c openmm=7.7 parmed mdtraj
Notes on cluster information:
* local machine: (py310) openMM 7.7+ cudatoolkit 11.6
* Plgrid: (py310) openMM 7.7+ cudatoolkit 10.2
* ACI: (base) OpenMM 7.7+ cudatoolkit 11.7
* Ares: openMM 7.7 + cudatoolkit 11.6 ( GPU Tesla V100)
Job with GPU
#!/bin/bash
#SBATCH --job-name ares_gpu
#SBATCH --nodes 1
#SBATCH --partition plgrid-gpu-v100
#SBATCH --gres=gpu:1
#SBATCH --ntasks-per-node 1
#SBATCH --mem 15GB
#SBATCH --time 48:00:00
#SBATCH -C localfs
#SBATCH -A plgrisa-gpu
#SBATCH --dependency=singleton
#SBATCH --output cryosparc-master-log-%J.txt
#SBATCH --signal=B:2@240
## prometheus
#source /net/people/plgqvuvan/anaconda3/etc/profile.d/conda.sh
## ares
cd $SLURM_SUBMIT_DIR
conda init bash
source /net/people/plgrid/plgqvuvan/plggligroup/qvv5013/anaconda3/etc/profile.d/conda.sh
conda activate py310
module add cuda/11.6.0
echo "NVIDIA-DRIVER version:"`nvidia-smi`
python single_run_extend.py -f control.cntrl
interactive mode on ares:
- using GPU:
srun -p plgrid-gpu-v100 --nodes=1 --ntasks=1 --mem=5GB --time=0-1 --pty bash
- using CPU:
srun -p plgrid --nodes=1 --ntasks=1 --mem=5GB --time=0-1 --pty bash