Sep 18, 2018
$ cat myJob.sub #!/usr/bin/bash --login #PBS -N myJob #PBS -l walltime=4:00:00 #PBS -l nodes=1:ppn=4 #PBS -l mem=20gb #PBS -l feature=intel16 #PBS -o /mnt/research/quantgen/logs #PBS -j oe #PBS -m abe cd $PBS_O_WORKDIR Rscript myJob.R $ qsub myJob.sub 61177022.mgr-04.i
$ cat myJob.sub #!/usr/bin/bash --login #SBATCH --job-name=myJob #SBATCH --time=4:00:00 #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=4 #SBATCH --mem=20gb #SBATCH --constraint=intel18 #SBATCH --output=/mnt/research/quantgen/logs/%j #SBATCH --mail-type=FAIL,BEGIN,END cd $SLURM_SUBMIT_DIR Rscript myJob.R $ sbatch myJob.sub Submitted batch job 134483
$ qstat -u $USER mgr-04.i: Req'd Req'd Elap Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time ----------------------- ----------- -------- ---------------- ------ ----- ------ --------- --------- - --------- 61197580.mgr-04.i gruenebe main STDIN 112526 1 1 1gb 01:00:00 R 00:01:41
$ squeue -u $USER JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) 136161 general-l bash gruenebe R 0:20 1 lac-339
$ qdel 61197580.mgr-04.i
$ scancel 136161
#PBS -t 1-5
$PBS_ARRAYID
#SBATCH --array=1-5
and #SBATCH --output=/mnt/research/quantgen/logs/%A_%a
$SLURM_ARRAY_TASK_ID
$ qsub -I -l nodes=1:ppn=4 -l mem=20gb -l walltime=1:00:00
$ srun --nodes=1 --ntasks=1 --cpus-per-task=4 --mem=20gb --time=1:00:00 --pty /bin/bash
Work in progress:
The shared configuration files have been adapted to the new system and automatically load the following software:
More available software: gcta/1.91.5b, htop/2.2.0, ncdu/1.13, parallel/20180722, plink/2.00a1, tmux/2.7, tree/1.7.0