diff --git a/content/post/2023-july-maintenance.md b/content/post/2023-july-maintenance.md index 796eca261..bb839d305 100644 --- a/content/post/2023-july-maintenance.md +++ b/content/post/2023-july-maintenance.md @@ -22,7 +22,13 @@ All systems are expected to return to service by **6 a.m. on Wednesday, July 19* RC engineers will be installing a new `/scratch` storage filesystem that can be accessed at `/scratch/$USER` after the end of maintenance. -**The current `/scratch` filesystem will be permanently retired on October 17, 2023 and all the data it contains will be deleted.** We have prepared a sample script for users who wish to transfer files to the new scratch system. Users should clean up their current `/scratch` directory in preparation, to minimize the load. The downloadable script will be posted here after maintenance. +**Modified queue limits will be implemented to provide maximum read/write performance of the new /scratch filesystem.** Users are encouraged to consult our [updated documentation](https://www.rc.virginia.edu/userinfo/rivanna/queues/) and adjust their job scripts accordingly. + +**The current `/scratch` filesystem will be permanently retired on October 17, 2023 and all the data it contains will be deleted.** We have prepared a sample script for users who wish to transfer files to the new scratch system. Users should clean up their current `/scratch` directory in preparation, to minimize the load. A sample script is posted below. + +**Example script to copy files** + +{{< pull-code file="/static/scripts/demo-copy-scratch.slurm" lang="bash" >}} The script will also be available through the Open OnDemand Job Composer: diff --git a/layouts/shortcodes/code-download.html b/layouts/shortcodes/code-download.html new file mode 100644 index 000000000..b7ca7aacb --- /dev/null +++ b/layouts/shortcodes/code-download.html @@ -0,0 +1,7 @@ +{{ $file := .Get "file" }} +{{ $lang := .Get "language" }} +{{ $code := readFile $file }} +{{ (print "```" $lang "\n" $code "\n```") | markdownify }} + + + diff --git a/static/scripts/abinit.slurm b/static/scripts/abinit.slurm index b3c2d2806..ecb83828c 100644 --- a/static/scripts/abinit.slurm +++ b/static/scripts/abinit.slurm @@ -1,7 +1,7 @@ #!/bin/bash #SBATCH --job-name=abinit #SBATCH -N 5 -#SBATCH --mem-per-cpu=6000 +#SBATCH --mem-per-cpu=9000 #SBATCH --ntasks-per-node=20 #SBATCH -t 10:00:00 #SBATCH -p parallel diff --git a/static/scripts/demo-copy-scratch.slurm b/static/scripts/demo-copy-scratch.slurm new file mode 100644 index 000000000..c5ed6e80b --- /dev/null +++ b/static/scripts/demo-copy-scratch.slurm @@ -0,0 +1,6 @@ +#!/bin/bash +#SBATCH -A your_allocation # to find your allocation, type "allocations" +#SBATCH -t 12:00:00 # up to 7-00:00:00 (7 days) +#SBATCH -p standard + +rsync -av /oldscratch/$USER/ /scratch/$USER diff --git a/static/scripts/gaussian_serial.slurm b/static/scripts/gaussian_serial.slurm index a41ae36be..96f855b37 100644 --- a/static/scripts/gaussian_serial.slurm +++ b/static/scripts/gaussian_serial.slurm @@ -2,7 +2,7 @@ #SBATCH --tasks=1 #SBATCH -t 160:00:00 #SBATCH -p standard -#SBATCH --mem=6000 +#SBATCH --mem=9000 #SBATCH -A mygroup module load gaussian/g16 diff --git a/static/scripts/mpi_job.slurm b/static/scripts/mpi_job.slurm index 7b8e0dda5..a8108e259 100644 --- a/static/scripts/mpi_job.slurm +++ b/static/scripts/mpi_job.slurm @@ -1,6 +1,6 @@ #!/bin/bash #SBATCH --nodes=2 -#SBATCH --ntasks-per-node=16 +#SBATCH --ntasks-per-node=36 #SBATCH --time=12:00:00 #SBATCH --output=output_filename #SBATCH --partition=parallel diff --git a/static/scripts/orca_multinode.slurm b/static/scripts/orca_multinode.slurm index 3a1e83efb..b6d42c671 100644 --- a/static/scripts/orca_multinode.slurm +++ b/static/scripts/orca_multinode.slurm @@ -2,7 +2,7 @@ #SBATCH -A mygroup # your allocation account #SBATCH -p parallel # partition #SBATCH -N 3 # number of nodes -#SBATCH --ntasks-per-node=40 # number of tasks +#SBATCH --ntasks-per-node=36 # number of tasks #SBATCH -t 24:00:00 # time module purge diff --git a/static/scripts/smrtlink_blasr.slurm b/static/scripts/smrtlink_blasr.slurm index ee2d7c27a..5936b99f4 100644 --- a/static/scripts/smrtlink_blasr.slurm +++ b/static/scripts/smrtlink_blasr.slurm @@ -3,7 +3,7 @@ #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=8 -#SBATCH --mem-per-cpu=6000 +#SBATCH --mem-per-cpu=9000 #SBATCH --time=06:00:00 module purge diff --git a/static/scripts/smrtlink_ngmlr.slurm b/static/scripts/smrtlink_ngmlr.slurm index 665744815..74a3260e3 100644 --- a/static/scripts/smrtlink_ngmlr.slurm +++ b/static/scripts/smrtlink_ngmlr.slurm @@ -3,7 +3,7 @@ #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=8 -#SBATCH --mem-per-cpu=6000 +#SBATCH --mem-per-cpu=9000 #SBATCH --time=06:00:00 module purge diff --git a/static/scripts/smrtlink_sawriter.slurm b/static/scripts/smrtlink_sawriter.slurm index 08b5d4431..413ca32ea 100644 --- a/static/scripts/smrtlink_sawriter.slurm +++ b/static/scripts/smrtlink_sawriter.slurm @@ -3,7 +3,7 @@ #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=1 # multi-threading not supported -#SBATCH --mem-per-cpu=6000 +#SBATCH --mem-per-cpu=9000 #SBATCH --time=06:00:00 module purge diff --git a/static/scripts/spark_multinode.slurm b/static/scripts/spark_multinode.slurm index 709e7ace1..1be1ad65e 100644 --- a/static/scripts/spark_multinode.slurm +++ b/static/scripts/spark_multinode.slurm @@ -3,7 +3,7 @@ #SBATCH --exclusive # do not modify #SBATCH -A myaccount # your allocation #SBATCH -N 3 # number of nodes -#SBATCH -c 40 # number of cores per node +#SBATCH -c 36 # number of cores per node #SBATCH -t 3:00:00 # time module purge diff --git a/static/scripts/vasp.slurm b/static/scripts/vasp.slurm index c259c2bf5..dc2344c0e 100644 --- a/static/scripts/vasp.slurm +++ b/static/scripts/vasp.slurm @@ -1,7 +1,7 @@ #!/bin/bash #SBATCH --account my_acct #SBATCH --nodes=8 -#SBATCH --ntasks-per-node=16 +#SBATCH --ntasks-per-node=36 #SBATCH --time=3-00:00:00 #SBATCH --output=thermo.out #SBATCH --partition=parallel