For more detailed information, see Slurm sbatch documentation.
#SBATCH
--job-name=<name> -J <name>
--account=<account> -A <account>
--partition=<queue>
--time=<hh:mm:ss> -t
--nodes=<count> -N <count>
-n <count>
--ntasks-per-node=<count>
--cpus-per-task=<cores>
--mem=<limit>
--mem-per-cpu=<memory>
--gres=gpu:<count>
-w, --nodelist=<node>[,node2[,...]]> -F, --nodefile=<node file>
-a <array indices>
--output=<file path>
--error=<file path>
--output=<combined out and err file path>
--constraint=<architecture> -C <architecture>
--export=ALL
--export=NONE
--export=<variable[=value][,variable2=value2[,...]]>
--dependency=after:jobID[:jobID...] --dependency=afterok:jobID[:jobID...] --dependency=afternotok:jobID[:jobID...] --dependency=afterany:jobID[:jobID...]
--mail-type=<events>
--mail-type=BEGIN,END,FAIL
--mail-user=<email address>
--begin=<date/time>
--exclusive
sbatch <job script>
scancel <job ID>
squeue
squeue -j <job ID>
squeue -u <netID>
scontrol show job -dd <job ID> checkjob <job ID>
squeue -j <job ID> --start
scontrol show partition [queue]
scontrol hold <job ID>
scontrol release <job ID>
salloc <args> srun --pty <args>
srun --pty <args> --x11
sacct -j <job_num> --format JobID,jobname,NTasks,nodelist,CPUTime,ReqMem,Elapsed
scontrol write batch_script <jobID> [filename]
sbatch --version
$SLURM_JOB_NAME
$SLURM_JOB_ID
$SLURM_SUBMIT_DIR
$SLURM_JOB_NODELIST
scontrol show hostnames $SLURM_JOB_NODELIST
$SLURM_ARRAY_TASK_ID
$SLURM_JOB_PARTITION
$SLURM_JOB_NUM_NODES $SLURM_NNODES
$SLURM_NTASKS
$SLURM_TASKS_PER_NODE
$SLURM_NTASKS_PER_NODE
$SLURM_CPUS_PER_TASK
$SLURM_PRIO_PROCESS
$SLURM_JOB_USER
$HOSTNAME == $SLURM_SUBMIT_HOST
$HOSTNAME