diff --git a/img/bandwidth_interfaces.png b/img/bandwidth_interfaces.png new file mode 100644 index 0000000..857c76f Binary files /dev/null and b/img/bandwidth_interfaces.png differ diff --git a/img/memory_hierarchy.png b/img/memory_hierarchy.png new file mode 100644 index 0000000..b3ab6c4 Binary files /dev/null and b/img/memory_hierarchy.png differ diff --git a/index.html b/index.html index 697d34b..c21715d 100644 --- a/index.html +++ b/index.html @@ -8,9 +8,9 @@ - + - +
Log into bastion node (not necessary within SWC network)
- +This node is fine for light work, but no intensive analyses
@@ -479,7 +610,7 @@List available modules
- +View a summary of the available resources
- +atyson@sgw2:~$ sinfo
PARTITION AVAIL TIMELIMIT NODES STATE NODELIST
cpu* up infinite 29 idle~ enc1-node[1,3-14],enc2-node[1-10,12-13],enc3-node[5-8]
@@ -615,7 +745,7 @@ SLURM commands
View currently running jobs (from everyone)
-
+
atyson@sgw2:~$ squeue
JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)
4036257 cpu bash imansd R 13-01:10:01 1 enc1-node2
@@ -651,7 +781,7 @@ Partitions
Interactive job
Start an interactive job (bash -i
) in the cpu partition (-p cpu
) in pseudoterminal mode (--pty
).
-
+
Always start a job (interactive or batch) before doing anything intensive to spare the gateway node.
@@ -659,117 +789,117 @@ Interactive job
Run some “analysis”
Clone a test script
-
+
Create conda environment
-
+
Activate conda environment and run Python script
-
+
Batch jobs
Check out batch script:
-
-#!/bin/bash
-
-#SBATCH -p gpu # partition (queue)
-#SBATCH -N 1 # number of nodes
-#SBATCH --mem 2G # memory pool for all cores
-#SBATCH -n 2 # number of cores
-#SBATCH -t 0-0:10 # time (D-HH:MM)
-#SBATCH -o slurm_output.out
-#SBATCH -e slurm_error.err
-#SBATCH --mail-type=ALL
-#SBATCH --mail-user=adam.tyson@ucl.ac.uk
-
-module load miniconda
-conda activate slurm_demo
-
-for i in {1..5}
-do
- echo "Multiplying $i by 10"
- python multiply.py $i 10 --jazzy
-done
+
+#!/bin/bash
+
+#SBATCH -p gpu # partition (queue)
+#SBATCH -N 1 # number of nodes
+#SBATCH --mem 2G # memory pool for all cores
+#SBATCH -n 2 # number of cores
+#SBATCH -t 0-0:10 # time (D-HH:MM)
+#SBATCH -o slurm_output.out
+#SBATCH -e slurm_error.err
+#SBATCH --mail-type=ALL
+#SBATCH --mail-user=adam.tyson@ucl.ac.uk
+
+module load miniconda
+conda activate slurm_demo
+
+for i in {1..5}
+do
+ echo "Multiplying $i by 10"
+ python multiply.py $i 10 --jazzy
+done
Run batch job:
-
+
Array jobs
Check out array script:
-
-#!/bin/bash
-
-#SBATCH -p gpu # partition (queue)
-#SBATCH -N 1 # number of nodes
-#SBATCH --mem 2G # memory pool for all cores
-#SBATCH -n 2 # number of cores
-#SBATCH -t 0-0:10 # time (D-HH:MM)
-#SBATCH -o slurm_array_%A-%a.out
-#SBATCH -e slurm_array_%A-%a.err
-#SBATCH --mail-type=ALL
-#SBATCH --mail-user=adam.tyson@ucl.ac.uk
-#SBATCH --array=0-9%4
-
-# Array job runs 10 separate jobs, but not more than four at a time.
-# This is flexible and the array ID ($SLURM_ARRAY_TASK_ID) can be used in any way.
-
-module load miniconda
-conda activate slurm_demo
-
-echo "Multiplying $SLURM_ARRAY_TASK_ID by 10"
-python multiply.py $SLURM_ARRAY_TASK_ID 10 --jazzy
+
+#!/bin/bash
+
+#SBATCH -p gpu # partition (queue)
+#SBATCH -N 1 # number of nodes
+#SBATCH --mem 2G # memory pool for all cores
+#SBATCH -n 2 # number of cores
+#SBATCH -t 0-0:10 # time (D-HH:MM)
+#SBATCH -o slurm_array_%A-%a.out
+#SBATCH -e slurm_array_%A-%a.err
+#SBATCH --mail-type=ALL
+#SBATCH --mail-user=adam.tyson@ucl.ac.uk
+#SBATCH --array=0-9%4
+
+# Array job runs 10 separate jobs, but not more than four at a time.
+# This is flexible and the array ID ($SLURM_ARRAY_TASK_ID) can be used in any way.
+
+module load miniconda
+conda activate slurm_demo
+
+echo "Multiplying $SLURM_ARRAY_TASK_ID by 10"
+python multiply.py $SLURM_ARRAY_TASK_ID 10 --jazzy
Run array job:
-
+
Using GPUs
Start an interactive job with one GPU:
-
+
Load TensorFlow & CUDA
-
+
Useful commands
Cancel a job
-
+
@@ -783,13 +913,14 @@ Modern behavioural analysis
-
+
+
-
-
+
sleap_train_slurm.sh
#!/bin/bash
-
-#SBATCH -J slp_train # job name
-#SBATCH -p gpu # partition (queue)
-#SBATCH -N 1 # number of nodes
-#SBATCH --mem 16G # memory pool for all cores
-#SBATCH -n 4 # number of cores
-#SBATCH -t 0-06:00 # time (D-HH:MM)
-#SBATCH --gres gpu:1 # request 1 GPU (of any kind)
-#SBATCH -o slurm.%x.%N.%j.out # STDOUT
-#SBATCH -e slurm.%x.%N.%j.err # STDERR
-#SBATCH --mail-type=ALL
-#SBATCH --mail-user=user@domain.com
-
-# Load the SLEAP module
-module load SLEAP
-
-# Define the directory of the exported training job package
-SLP_JOB_NAME=labels.v001.slp.training_job
-SLP_JOB_DIR=/ceph/scratch/$USER/$SLP_JOB_NAME
-
-# Go to the job directory
-cd $SLP_JOB_DIR
-
-# Run the training script generated by SLEAP
-./train-script.sh
#!/bin/bash
+
+#SBATCH -J slp_train # job name
+#SBATCH -p gpu # partition (queue)
+#SBATCH -N 1 # number of nodes
+#SBATCH --mem 16G # memory pool for all cores
+#SBATCH -n 4 # number of cores
+#SBATCH -t 0-06:00 # time (D-HH:MM)
+#SBATCH --gres gpu:1 # request 1 GPU (of any kind)
+#SBATCH -o slurm.%x.%N.%j.out # STDOUT
+#SBATCH -e slurm.%x.%N.%j.err # STDERR
+#SBATCH --mail-type=ALL
+#SBATCH --mail-user=user@domain.com
+
+# Load the SLEAP module
+module load SLEAP
+
+# Define the directory of the exported training job package
+SLP_JOB_NAME=labels.v001.slp.training_job
+SLP_JOB_DIR=/ceph/scratch/$USER/$SLP_JOB_NAME
+
+# Go to the job directory
+cd $SLP_JOB_DIR
+
+# Run the training script generated by SLEAP
+./train-script.sh
View the status of your queued/running jobs with squeue --me
View status of running/completed jobs with sacct
:
sacct
-
-JobID JobName Partition Account AllocCPUS State ExitCode
------------- ---------- ---------- ---------- ---------- ---------- --------
-4232289 slp_train gpu swc-ac 4 RUNNING 0:0
-4232289.bat+ batch swc-ac 4 RUNNING 0:0
sacct
+
+JobID JobName Partition Account AllocCPUS State ExitCode
+------------ ---------- ---------- ---------- ---------- ---------- --------
+4232289 slp_train gpu swc-ac 4 RUNNING 0:0
+4232289.bat+ batch swc-ac 4 RUNNING 0:0
Run sacct
with some more helpful arguments, e.g. view jobs from the last 24 hours, incl. time elapsed and peak memory usage in KB (MaxRSS):
While you wait for the training job to finish, you can copy and inspect the trained models from a previous run:
-cp -R /ceph/scratch/sirmpilatzen/labels.v001.slp.training_job/models /ceph/scratch/$USER/labels.v001.slp.training_job/
-cd /ceph/scratch/$USER/labels.v001.slp.training_job/models
-ls
cp -R /ceph/scratch/sirmpilatzen/labels.v001.slp.training_job/models /ceph/scratch/$USER/labels.v001.slp.training_job/
+cd /ceph/scratch/$USER/labels.v001.slp.training_job/models
+ls
sleap_inference_slurm.sh
#!/bin/bash
-
-#SBATCH -J slp_infer # job name
-#SBATCH -p gpu # partition
-#SBATCH -N 1 # number of nodes
-#SBATCH --mem 64G # memory pool for all cores
-#SBATCH -n 32 # number of cores
-#SBATCH -t 0-01:00 # time (D-HH:MM)
-#SBATCH --gres gpu:rtx5000:1 # request 1 RTX5000 GPU
-#SBATCH -o slurm.%x.%N.%j.out # write STDOUT
-#SBATCH -e slurm.%x.%N.%j.err # write STDERR
-#SBATCH --mail-type=ALL
-#SBATCH --mail-user=user@domain.com
-
-# Load the SLEAP module
-module load SLEAP
-
-# Define directories for exported SLEAP job package and videos
-SLP_JOB_NAME=labels.v001.slp.training_job
-SLP_JOB_DIR=/ceph/scratch/$USER/$SLP_JOB_NAME
-VIDEO_DIR=/ceph/scratch/neuroinformatics-dropoff/SLEAP_HPC_test_data/course-hpc-2023/videos
-VIDEO1_PREFIX=sub-01_ses-01_task-EPM_time-165049
-
-# Go to the job directory
-cd $SLP_JOB_DIR
-
-# Make a directory to store the predictions
-mkdir -p predictions
-
-# Run the inference command
-sleap-track $VIDEO_DIR/${VIDEO1_PREFIX}_video.mp4 \
- -m $SLP_JOB_DIR/models/231130_160757.centroid/training_config.json \
- -m $SLP_JOB_DIR/models/231130_160757.centered_instance/training_config.json \
- -o $SLP_JOB_DIR/predictions/${VIDEO1_PREFIX}_predictions.slp \
- --gpu auto \
- --no-empty-frames
#!/bin/bash
+
+#SBATCH -J slp_infer # job name
+#SBATCH -p gpu # partition
+#SBATCH -N 1 # number of nodes
+#SBATCH --mem 64G # memory pool for all cores
+#SBATCH -n 32 # number of cores
+#SBATCH -t 0-01:00 # time (D-HH:MM)
+#SBATCH --gres gpu:rtx5000:1 # request 1 RTX5000 GPU
+#SBATCH -o slurm.%x.%N.%j.out # write STDOUT
+#SBATCH -e slurm.%x.%N.%j.err # write STDERR
+#SBATCH --mail-type=ALL
+#SBATCH --mail-user=user@domain.com
+
+# Load the SLEAP module
+module load SLEAP
+
+# Define directories for exported SLEAP job package and videos
+SLP_JOB_NAME=labels.v001.slp.training_job
+SLP_JOB_DIR=/ceph/scratch/$USER/$SLP_JOB_NAME
+VIDEO_DIR=/ceph/scratch/neuroinformatics-dropoff/SLEAP_HPC_test_data/course-hpc-2023/videos
+VIDEO1_PREFIX=sub-01_ses-01_task-EPM_time-165049
+
+# Go to the job directory
+cd $SLP_JOB_DIR
+
+# Make a directory to store the predictions
+mkdir -p predictions
+
+# Run the inference command
+sleap-track $VIDEO_DIR/${VIDEO1_PREFIX}_video.mp4 \
+ -m $SLP_JOB_DIR/models/231130_160757.centroid/training_config.json \
+ -m $SLP_JOB_DIR/models/231130_160757.centered_instance/training_config.json \
+ -o $SLP_JOB_DIR/predictions/${VIDEO1_PREFIX}_predictions.slp \
+ --gpu auto \
+ --no-empty-frames
sleap_inference_slurm_array.sh
#!/bin/bash
-
-#SBATCH -J slp_infer # job name
-#SBATCH -p gpu # partition
-#SBATCH -N 1 # number of nodes
-#SBATCH --mem 64G # memory pool for all cores
-#SBATCH -n 32 # number of cores
-#SBATCH -t 0-01:00 # time (D-HH:MM)
-#SBATCH --gres gpu:rtx5000:1 # request 1 RTX5000 GPU
-#SBATCH -o slurm.%x.%N.%j.out # write STDOUT
-#SBATCH -e slurm.%x.%N.%j.err # write STDERR
-#SBATCH --mail-type=ALL
-#SBATCH --mail-user=user@domain.com
-#SBATCH --array=1-2
-
-# Load the SLEAP module
-module load SLEAP
-
-# Define directories for exported SLEAP job package and videos
-SLP_JOB_NAME=labels.v001.slp.training_job
-SLP_JOB_DIR=/ceph/scratch/$USER/$SLP_JOB_NAME
-VIDEO_DIR=/ceph/scratch/neuroinformatics-dropoff/SLEAP_HPC_test_data/course-hpc-2023/videos
-
-VIDEO1_PREFIX=sub-01_ses-01_task-EPM_time-165049
-VIDEO2_PREFIX=sub-02_ses-01_task-EPM_time-185651
-VIDEOS_PREFIXES=($VIDEO1_PREFIX $VIDEO2_PREFIX)
-CURRENT_VIDEO_PREFIX=${VIDEOS_PREFIXES[$SLURM_ARRAY_TASK_ID - 1]}
-echo "Current video prefix: $CURRENT_VIDEO_PREFIX"
-
-# Go to the job directory
-cd $SLP_JOB_DIR
-
-# Make a directory to store the predictions
-mkdir -p predictions
-
-# Run the inference command
-sleap-track $VIDEO_DIR/${CURRENT_VIDEO_PREFIX}_video.mp4 \
- -m $SLP_JOB_DIR/models/231130_160757.centroid/training_config.json \
- -m $SLP_JOB_DIR/models/231130_160757.centered_instance/training_config.json \
- -o $SLP_JOB_DIR/predictions/${CURRENT_VIDEO_PREFIX}_array_predictions.slp \
- --gpu auto \
- --no-empty-frames
#!/bin/bash
+
+#SBATCH -J slp_infer # job name
+#SBATCH -p gpu # partition
+#SBATCH -N 1 # number of nodes
+#SBATCH --mem 64G # memory pool for all cores
+#SBATCH -n 32 # number of cores
+#SBATCH -t 0-01:00 # time (D-HH:MM)
+#SBATCH --gres gpu:rtx5000:1 # request 1 RTX5000 GPU
+#SBATCH -o slurm.%x.%N.%j.out # write STDOUT
+#SBATCH -e slurm.%x.%N.%j.err # write STDERR
+#SBATCH --mail-type=ALL
+#SBATCH --mail-user=user@domain.com
+#SBATCH --array=1-2
+
+# Load the SLEAP module
+module load SLEAP
+
+# Define directories for exported SLEAP job package and videos
+SLP_JOB_NAME=labels.v001.slp.training_job
+SLP_JOB_DIR=/ceph/scratch/$USER/$SLP_JOB_NAME
+VIDEO_DIR=/ceph/scratch/neuroinformatics-dropoff/SLEAP_HPC_test_data/course-hpc-2023/videos
+
+VIDEO1_PREFIX=sub-01_ses-01_task-EPM_time-165049
+VIDEO2_PREFIX=sub-02_ses-01_task-EPM_time-185651
+VIDEOS_PREFIXES=($VIDEO1_PREFIX $VIDEO2_PREFIX)
+CURRENT_VIDEO_PREFIX=${VIDEOS_PREFIXES[$SLURM_ARRAY_TASK_ID - 1]}
+echo "Current video prefix: $CURRENT_VIDEO_PREFIX"
+
+# Go to the job directory
+cd $SLP_JOB_DIR
+
+# Make a directory to store the predictions
+mkdir -p predictions
+
+# Run the inference command
+sleap-track $VIDEO_DIR/${CURRENT_VIDEO_PREFIX}_video.mp4 \
+ -m $SLP_JOB_DIR/models/231130_160757.centroid/training_config.json \
+ -m $SLP_JOB_DIR/models/231130_160757.centered_instance/training_config.json \
+ -o $SLP_JOB_DIR/predictions/${CURRENT_VIDEO_PREFIX}_array_predictions.slp \
+ --gpu auto \
+ --no-empty-frames
SWC | 2023-12-04
+SWC | 2024-10-04
+=0;)(r=i[a])&&(o&&4^r.compareDocumentPosition(o)&&o.parentNode.insertBefore(r,o),o=r);return this},sort:function(t){function e(e,n){return e&&n?t(e.__data__,n.__data__):!e-!n}t||(t=F);for(var n=this._groups,r=n.length,i=new Array(r),a=0;a >1)+f+t+k+C.slice(E);break;default:t=C+f+t+k}return s(t)}return y=void 0===y?6:/[gprs]/.test(v)?Math.max(1,Math.min(21,y)):Math.max(0,Math.min(20,y)),k.toString=function(){return t+""},k}return{format:h,formatPrefix:function(t,e){var n=h(((t=Ji(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor(na(e)/3))),i=Math.pow(10,-r),a=la[8+r/3];return function(t){return n(i*t)+a}}}}({thousands:",",grouping:[3],currency:["$",""]}),sa=oa.format,ca=oa.formatPrefix;class da extends Map{constructor(t,e=ga){if(super(),Object.defineProperties(this,{_intern:{value:new Map},_key:{value:e}}),null!=t)for(const[e,n]of t)this.set(e,n)}get(t){return super.get(pa(this,t))}has(t){return super.has(pa(this,t))}set(t,e){return super.set(function({_intern:t,_key:e},n){const r=e(n);return t.has(r)?t.get(r):(t.set(r,n),n)}(this,t),e)}delete(t){return super.delete(function({_intern:t,_key:e},n){const r=e(n);return t.has(r)&&(n=t.get(r),t.delete(r)),n}(this,t))}}function pa({_intern:t,_key:e},n){const r=e(n);return t.has(r)?t.get(r):n}function ga(t){return null!==t&&"object"==typeof t?t.valueOf():t}Set;const ya=Symbol("implicit");function ma(){var t=new da,e=[],n=[],r=ya;function i(i){let a=t.get(i);if(void 0===a){if(r!==ya)return r;t.set(i,a=e.push(i)-1)}return n[a%n.length]}return i.domain=function(n){if(!arguments.length)return e.slice();e=[],t=new da;for(const r of n)t.has(r)||t.set(r,e.push(r)-1);return i},i.range=function(t){return arguments.length?(n=Array.from(t),i):n.slice()},i.unknown=function(t){return arguments.length?(r=t,i):r},i.copy=function(){return ma(e,n).unknown(r)},Zi.apply(i,arguments),i}const va=1e3,ba=6e4,_a=36e5,xa=864e5,wa=6048e5,ka=31536e6;var Ta=new Date,Ea=new Date;function Ca(t,e,n,r){function i(e){return t(e=0===arguments.length?new Date:new Date(+e)),e}return i.floor=function(e){return t(e=new Date(+e)),e},i.ceil=function(n){return t(n=new Date(n-1)),e(n,1),t(n),n},i.round=function(t){var e=i(t),n=i.ceil(t);return t-e =0;)(r=i[a])&&(o&&4^r.compareDocumentPosition(o)&&o.parentNode.insertBefore(r,o),o=r);return this},sort:function(t){function e(e,n){return e&&n?t(e.__data__,n.__data__):!e-!n}t||(t=cc);for(var n=this._groups,r=n.length,i=new Array(r),a=0;a=c)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=x[i in To?e.charAt(o++):i])||(r=a(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return b.x=w(n,b),b.X=w(r,b),b.c=w(e,b),_.x=w(n,_),_.X=w(r,_),_.c=w(e,_),{format:function(t){var e=w(t+="",b);return e.toString=function(){return t},e},parse:function(t){var e=k(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=w(t+="",_);return e.toString=function(){return t},e},utcParse:function(t){var e=k(t+="",!0);return e.toString=function(){return t},e}}}({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]}),ko=wo.format,wo.parse,wo.utcFormat,wo.utcParse;var Ks=Array.prototype.find;function Qs(){return this.firstElementChild}var Js=Array.prototype.filter;function tc(){return Array.from(this.children)}function ec(t){return new Array(t.length)}function nc(t,e){this.ownerDocument=t.ownerDocument,this.namespaceURI=t.namespaceURI,this._next=null,this._parent=t,this.__data__=e}function rc(t){return function(){return t}}function ic(t,e,n,r,i,a){for(var o,s=0,c=e.length,u=a.length;se?1:t>=e?0:NaN}nc.prototype={constructor:nc,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,e){return this._parent.insertBefore(t,e)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var uc="http://www.w3.org/1999/xhtml";const lc={svg:"http://www.w3.org/2000/svg",xhtml:uc,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function hc(t){var e=t+="",n=e.indexOf(":");return n>=0&&"xmlns"!==(e=t.slice(0,n))&&(t=t.slice(n+1)),lc.hasOwnProperty(e)?{space:lc[e],local:t}:t}function fc(t){return function(){this.removeAttribute(t)}}function dc(t){return function(){this.removeAttributeNS(t.space,t.local)}}function pc(t,e){return function(){this.setAttribute(t,e)}}function gc(t,e){return function(){this.setAttributeNS(t.space,t.local,e)}}function yc(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttribute(t):this.setAttribute(t,n)}}function mc(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,n)}}function vc(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function bc(t){return function(){this.style.removeProperty(t)}}function _c(t,e,n){return function(){this.style.setProperty(t,e,n)}}function xc(t,e,n){return function(){var r=e.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,n)}}function wc(t,e){return t.style.getPropertyValue(e)||vc(t).getComputedStyle(t,null).getPropertyValue(e)}function kc(t){return function(){delete this[t]}}function Tc(t,e){return function(){this[t]=e}}function Ec(t,e){return function(){var n=e.apply(this,arguments);null==n?delete this[t]:this[t]=n}}function Cc(t){return t.trim().split(/^|\s+/)}function Sc(t){return t.classList||new Ac(t)}function Ac(t){this._node=t,this._names=Cc(t.getAttribute("class")||"")}function Mc(t,e){for(var n=Sc(t),r=-1,i=e.length;++r=0&&(e=t.slice(n+1),t=t.slice(0,n)),{type:t,name:e}}))}function Zc(t){return function(){var e=this.__on;if(e){for(var n,r=0,i=-1,a=e.length;r=0&&(this._names.splice(e,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var eu=[null];function nu(t,e){this._groups=t,this._parents=e}function ru(){return new nu([[document.documentElement]],eu)}nu.prototype=ru.prototype={constructor:nu,select:function(t){"function"!=typeof t&&(t=Hs(t));for(var e=this._groups,n=e.length,r=new Array(n),i=0;i=c)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=x[i in Ul?e.charAt(o++):i])||(r=a(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return b.x=w(n,b),b.X=w(r,b),b.c=w(e,b),_.x=w(n,_),_.X=w(r,_),_.c=w(e,_),{format:function(t){var e=w(t+="",b);return e.toString=function(){return t},e},parse:function(t){var e=k(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=w(t+="",_);return e.toString=function(){return t},e},utcParse:function(t){var e=k(t+="",!0);return e.toString=function(){return t},e}}}({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]}),jl=Yl.format,Yl.parse,Yl.utcFormat,Yl.utcParse;var af={value:()=>{}};function of(){for(var t,e=0,n=arguments.length,r={};e