Ares supercomputer
CPU job script
#!/bin/bash
#SBATCH --partition plgrid
#SBATCH --job-name cryosparc-master
#SBATCH --nodes 1
#SBATCH --ntasks-per-node 1
#SBATCH --mem 15GB
#SBATCH --time 72:00:00
#SBATCH -C localfs
#SBATCH -A plgrisa-cpu
#SBATCH --dependency=singleton
#SBATCH --output cryosparc-master-log-%J.txt
#SBATCH --signal=B:2@240
echo "Job run" > test.txt
GPU Job script
#!/bin/bash
#SBATCH --job-name ispe_0
#SBATCH --nodes 1
#SBATCH --partition plgrid-gpu-v100
#SBATCH --gres=gpu:1
#SBATCH --ntasks-per-node 1
#SBATCH --mem 15GB
#SBATCH --time 48:00:00
#SBATCH -C localfs
#SBATCH -A plgrisa-gpu
#SBATCH --dependency=singleton
#SBATCH --output=output.out
#SBATCH --error=error.err
#SBATCH --signal=B:2@240
## ares
conda init bash
source /net/people/plgrid/plgqvuvan/plggligroup/qvv5013/anaconda3/etc/profile.d/conda.sh
conda activate py310
cd $SLURM_SUBMIT_DIR
echo `pwd`
#
python single_run.py -f control.cntrl
Interactive mode on Ares
srun -p plgrid -N 1 --ntasks-per-node=8 -n 8 --time=0-8 -A plgrisa-cpu --pty /bin/bash -l
To request for 1 node (-N 1), 8 threads and 8hrs
Prometheus supercomputer
CPU job
GPU job scripts
#!/bin/bash -l
#SBATCH -J pGcat_SETINDEX
#SBATCH -N 1
#SBATCH --ntasks-per-node=1
#SBATCH --mem-per-cpu=5GB
#SBATCH --time=72:00:00
#SBATCH -A plgribo3gpu
#SBATCH --gres=gpu
#SBATCH -p plgrid-gpu
#SBATCH --output=output.out
#SBATCH --error=error.err
cd $SLURM_SUBMIT_DIR
srun /bin/hostname
perl calc_entanglement_number.pl -i ../../setup/3cla_m_clean_ca.cor -t ../../SETINDEX/cat3_SETINDEX_prod.dcd -o ./