#!/bin/bash
#SBATCH -o /home/cluster/group/userID/mydir/job%j./%x.%j.%N.out
#SBATCH -D /home/cluster/group/userID/mydir/./
#SBATCH -J jobname
#SBATCH --time=01:00:00
#SBATCH --ntasksnodes=321-1
#SBATCH --getcpus-userper-envtask=28
#SBATCH ---clusters=mpp1 %ice1 for ICE, uv1 for UV ##### SuperMUC-NG use partition instead of clusters: get-user-env #SBATCH --account=insert your_projectID_here clusters=cm2_tiny #SBATCH --partition=insert test, micro, general, large or fat ### more details for SNG see: https://doku.lrz.de/display/PUBLIC/Job+Processing+with+SLURM+on+SuperMUC-NG ####SNG end##cm2_tiny
#SBATCH --export=NONE
#SBATCH --mail-type=end
#SBATCH --mail-user=name@domain module load slurm_setup
# load the wien2k module
module load wien2k
#change to working directory
cd $OPT_TMP/mydirexport
SCRATCH=./TMP_DIR=$SCRATCH/case
# supermuc
cd $TMP_DIR
rm -fr .machines
# for 32 cpus and kpoints (in input file)
nproc=32
#write .machines file
echo '#' .machines
# example for an MPI parallel lapw0
echo 'lapw0:'`hostname`'
:'$nproc >> .machines
# k-point and mpi parallel lapw1/2
echo '1:'`hostname`':3' >> .machines
echo '1:'`hostname`':3' >> .machines
echo '1:'`hostname`':3' >> .machines
echo '1:'`hostname`':3' >> .machines
echo '1:'`hostname`':3' >> .machines
echo '1:'`hostname`':3' >> .machines
echo '1:'`hostname`':3' >> .machines
echo '1:'`hostname`':3' >> .machines
echo '1:'`hostname`':3' >> .machines
echo '1:'`hostname`':3' >> .machines
echo '1:'`hostname`':2' >> .machines
echo 'granularity:1' >>.machines
echo 'extrafine:1' >>.machines
run_lapw -cc 0.0001 -i 50 -it |