diff --git a/Golden_Repo/p/ParMETIS/ParMETIS-4.0.3-gompi-2022a.eb b/Golden_Repo/p/ParMETIS/ParMETIS-4.0.3-gompi-2022a.eb
new file mode 100644
index 0000000000000000000000000000000000000000..28a972bcf25ac120c63601b0b7c331100869d687
--- /dev/null
+++ b/Golden_Repo/p/ParMETIS/ParMETIS-4.0.3-gompi-2022a.eb
@@ -0,0 +1,59 @@
+name = 'ParMETIS'
+
+version = '4.0.3'
+
+homepage = 'http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview'
+
+description = """ParMETIS is an MPI-based parallel library that implements a variety of algorithms for partitioning
+
+unstructured graphs, meshes, and for computing fill-reducing orderings of sparse matrices. ParMETIS extends the
+
+functionality provided by METIS and includes routines that are especially suited for parallel AMR computations and large
+
+scale numerical simulations. The algorithms implemented in ParMETIS are based on the parallel multilevel k-way
+
+graph-partitioning, adaptive repartitioning, and parallel multi-constrained partitioning schemes.
+
+"""
+
+toolchain = {'name': 'gompi', 'version': '2022a'}
+
+toolchainopts = {'optarch': True, 'usempi': True, 'pic': True, 'openmp': True}
+
+source_urls = ['http://glaros.dtc.umn.edu/gkhome/fetch/sw/parmetis']
+
+sources = [SOURCELOWER_TAR_GZ]
+
+patches = [
+
+    # Needed for elemental
+
+    'parmetis_computevertexseparator.patch'
+
+]
+
+checksums = [
+
+    'f2d9a231b7cf97f1fee6e8c9663113ebf6c240d407d3c118c55b3633d6be6e5f',  # parmetis-4.0.3.tar.gz
+
+    'b82f5e869b971b5e49566091a79783cc267276bcddcd939abf2240f415287fa7',  # parmetis_computevertexseparator.patch
+
+]
+
+builddependencies = [
+
+    ('CMake', '3.23.1')
+
+]
+
+modextravars = {
+
+    'PARMETIS_ROOT': '%(installdir)s',
+
+    'PARMETIS_LIB':  '%(installdir)s/lib',
+
+    'PARMETIS_INCLUDE': '%(installdir)s/include'
+
+}
+
+moduleclass = 'math'
diff --git a/Golden_Repo/p/ParMETIS/parmetis-4.0.3-double.patch b/Golden_Repo/p/ParMETIS/parmetis-4.0.3-double.patch
new file mode 100644
index 0000000000000000000000000000000000000000..bf6f1c5a889fa3526873dd8a000bbffbda800cb8
--- /dev/null
+++ b/Golden_Repo/p/ParMETIS/parmetis-4.0.3-double.patch
@@ -0,0 +1,11 @@
+--- parmetis-4.0.3/metis/include/metis.h.orig	2013-03-30 17:24:50.000000000 +0100
++++ parmetis-4.0.3/metis/include/metis.h	2016-04-20 11:07:49.485844000 +0200
+@@ -40,7 +40,7 @@
+    32 : single precission floating point (float)
+    64 : double precission floating point (double)
+ --------------------------------------------------------------------------*/
+-#define REALTYPEWIDTH 32
++#define REALTYPEWIDTH 64
+ 
+ 
+ 
diff --git a/Golden_Repo/p/ParMETIS/parmetis_computevertexseparator.patch b/Golden_Repo/p/ParMETIS/parmetis_computevertexseparator.patch
new file mode 100644
index 0000000000000000000000000000000000000000..adbc37c8510b4deeb44df2e2d1fd3652a257621a
--- /dev/null
+++ b/Golden_Repo/p/ParMETIS/parmetis_computevertexseparator.patch
@@ -0,0 +1,186 @@
+diff -ruN parmetis-4.0.3.old/include/parmetis.h parmetis-4.0.3/include/parmetis.h
+--- parmetis-4.0.3.old/include/parmetis.h	2017-04-05 17:20:11.888709466 +0200
++++ parmetis-4.0.3/include/parmetis.h	2017-04-05 17:21:38.247478696 +0200
+@@ -113,6 +113,12 @@
+              idx_t *vtxdist, idx_t *xadj, idx_t *adjncy, idx_t *numflag, 
+              idx_t *options, idx_t *order, idx_t *sizes, MPI_Comm *comm);
+ 
++void ParMETIS_ComputeVertexSeparator(
++             idx_t *vtxdist, idx_t *xadj, idx_t *adjncy,
++             idx_t *p_nseps, idx_t *s_nseps,
++             real_t *ubfrac, idx_t *idbglvl, idx_t *order, idx_t *sizes,
++             MPI_Comm *comm);
++
+ #ifdef __cplusplus
+ }
+ #endif
+diff -ruN parmetis-4.0.3.old/libparmetis/ComputeVertexSeparator.c parmetis-4.0.3/libparmetis/ComputeVertexSeparator.c
+--- parmetis-4.0.3.old/libparmetis/ComputeVertexSeparator.c	1970-01-01 01:00:00.000000000 +0100
++++ parmetis-4.0.3/libparmetis/ComputeVertexSeparator.c	2017-04-05 17:22:32.477589755 +0200
+@@ -0,0 +1,166 @@
++/*
++ * Copyright 1997, Regents of the University of Minnesota
++ * Created by modifying ParMETIS routines by Jack Poulson, 2012-2015
++ */
++#include <parmetislib.h>
++
++void ElParallelLabelVertices
++( ctrl_t *ctrl, graph_t *graph, idx_t *order, idx_t *sizes )
++{ 
++  idx_t i, j, nvtxs, id; 
++  idx_t *where, *lpwgts, *gpwgts;
++  idx_t sizescan[3];
++
++  nvtxs  = graph->nvtxs;
++  where  = graph->where;
++  lpwgts = graph->lpwgts;
++  gpwgts = graph->gpwgts;
++
++  /* Compute the local sizes of the left side, right side, and separator */
++  iset(3, 0, lpwgts);
++  for (i=0; i<nvtxs; i++) 
++      lpwgts[where[i]]++;
++
++  /* Perform a Prefix scan of the separator size to determine the boundaries */
++  gkMPI_Scan((void *)lpwgts, (void *)sizescan, 3, IDX_T, MPI_SUM, ctrl->comm);
++  gkMPI_Allreduce
++  ((void *)lpwgts, (void *)gpwgts, 3, IDX_T, MPI_SUM, ctrl->comm);
++
++  /* Fill in the size of the partition */
++  sizes[0] = gpwgts[0];
++  sizes[1] = gpwgts[1];
++  sizes[2] = gpwgts[2];
++
++  for( i=2; i>=0; --i )
++      for( j=i+1; j<3; ++j )
++          sizescan[i] += gpwgts[j];
++  for( i=0; i<3; i++ )
++      sizescan[i] -= lpwgts[i];
++
++  for( i=0; i<nvtxs; i++ ) 
++  {
++      id = where[i];
++      PASSERT(ctrl, id <= 2);
++      sizescan[id]++;
++      PASSERT(ctrl, order[i] == -1);
++      order[i] = graph->gnvtxs - sizescan[id];
++  }
++}
++
++void ElParallelOrder
++( ctrl_t *ctrl, graph_t *graph, idx_t *order, idx_t *sizes )
++{
++  idx_t i, nvtxs;
++
++  nvtxs = graph->nvtxs;
++  iset(nvtxs, -1, order);
++
++  /* graph->where = ismalloc(nvtxs, 0, "ElOrder: graph->where"); */
++  /* If we computed an initial partition with Global_Partition, then we 
++     should run the following instead of the above ismalloc of graph->where*/
++  iset(nvtxs, 0, graph->where); 
++  gk_free((void **)&graph->match, 
++          (void **)&graph->cmap, 
++          (void **)&graph->rlens, 
++          (void **)&graph->slens, 
++          (void **)&graph->rcand, LTERM);
++
++  Order_Partition_Multiple(ctrl, graph);
++
++  ElParallelLabelVertices(ctrl, graph, order, sizes);
++}
++
++void ParMETIS_ComputeVertexSeparator
++( idx_t *vtxdist, idx_t *xadj, idx_t *adjncy, 
++  idx_t *p_nseps, idx_t *s_nseps, 
++  real_t *ubfrac, idx_t *idbglvl, idx_t *order, idx_t *sizes, 
++  MPI_Comm *comm )
++{
++  idx_t i, j, npes, npesNonzero, mype, mypeNonzero, dbglvl, status, haveData;
++  ctrl_t *ctrl;
++  graph_t *graph;
++  MPI_Comm nonzeroComm, nullComm;
++  size_t curmem;
++
++  gkMPI_Comm_size(*comm, &npes);
++  gkMPI_Comm_rank(*comm, &mype);
++
++  if( vtxdist[npes] == 0 )
++  {
++      sizes[0] = 0;
++      sizes[1] = 0;
++      sizes[2] = 0;
++      return;
++  }
++
++  haveData = ( vtxdist[mype+1]-vtxdist[mype] != 0 );
++  if( haveData )
++      gkMPI_Comm_split(*comm, 1, mype, &nonzeroComm);
++  else
++      gkMPI_Comm_split(*comm, MPI_UNDEFINED, 0, &nullComm);
++
++  if( !haveData )
++  {
++      sizes[0] = sizes[1] = sizes[2] = 0;
++      gkMPI_Allreduce(MPI_IN_PLACE, (void *)sizes, 3, IDX_T, MPI_SUM, *comm);
++      return;
++  }
++
++  gkMPI_Comm_size(nonzeroComm, &npesNonzero);
++  gkMPI_Comm_rank(nonzeroComm, &mypeNonzero);
++
++  /* Compress the vtxdist data to make it match the new communicator */
++  j=0;
++  for( i=1; i<npes+1; ++i )
++      if( vtxdist[i] != vtxdist[j] )
++          vtxdist[++j] = vtxdist[i];
++
++  status = METIS_OK;
++  gk_malloc_init();
++  curmem = gk_GetCurMemoryUsed();
++
++  ctrl = SetupCtrl(PARMETIS_OP_KMETIS, NULL, 1, 2, NULL, NULL, nonzeroComm);
++
++  dbglvl = (idbglvl == NULL ? 0 : *idbglvl);
++  ctrl->dbglvl = dbglvl;
++
++  graph = SetupGraph(ctrl, 1, vtxdist, xadj, NULL, NULL, adjncy, NULL, 0);
++  AllocateWSpace(ctrl, 10*graph->nvtxs);
++
++  /* Compute an initial partition: for some reason this improves the quality */
++  ctrl->CoarsenTo = gk_min(vtxdist[npesNonzero]+1, 
++                           200*gk_max(npesNonzero,ctrl->nparts));
++  Global_Partition(ctrl, graph); 
++
++  /* Compute an ordering */
++  ctrl->optype    = PARMETIS_OP_OMETIS;
++  ctrl->partType  = ORDER_PARTITION;
++  ctrl->mtype     = PARMETIS_MTYPE_GLOBAL;
++  ctrl->rtype     = PARMETIS_SRTYPE_2PHASE;
++  ctrl->p_nseps   = (p_nseps  == NULL ? 1 : *p_nseps);
++  ctrl->s_nseps   = (s_nseps  == NULL ? 1 : *s_nseps);
++  ctrl->ubfrac    = (ubfrac == NULL ? ORDER_UNBALANCE_FRACTION : *ubfrac);
++  ctrl->dbglvl    = dbglvl;
++  ctrl->ipart     = ISEP_NODE;
++  ctrl->CoarsenTo = gk_min(graph->gnvtxs-1,1500*npesNonzero); 
++  ElParallelOrder(ctrl, graph, order, sizes);
++
++  FreeInitialGraphAndRemap(graph);
++
++  /* Pass the data to the early-exiting processes with an allreduce */
++  if( mypeNonzero != 0 )
++      sizes[0] = sizes[1] = sizes[2] = 0;
++  gkMPI_Allreduce(MPI_IN_PLACE, (void*)sizes, 3, IDX_T, MPI_SUM, *comm);
++
++  MPI_Comm_free( &nonzeroComm );
++
++  goto DONE;
++
++DONE:
++  FreeCtrl(&ctrl);
++  if (gk_GetCurMemoryUsed() - curmem > 0) {
++    printf("ParMETIS appears to have a memory leak of %zdbytes. Report this.\n",
++        (ssize_t)(gk_GetCurMemoryUsed() - curmem));
++  }
++  gk_malloc_cleanup(0);
++}
diff --git a/acls.yml b/acls.yml
index 3c4685419d4f944d879d4da4d70a9808771d34fa..efeed3065112d8ffdf4955f9963e08fbdc52881c 100644
--- a/acls.yml
+++ b/acls.yml
@@ -1123,7 +1123,7 @@ software:
     owner: 'mathprod'
     mpi: True
   - name: 'ParMETIS'
-    owner: 'mathprod'
+    owner: 'schoebel1'
     mpi: True
   - name: 'Elemental'
     owner: 'mathprod'