|
# {{{ Distributed-memory functionality |
|
|
|
@memoize_on_first_arg |
|
def connected_ranks(dcoll: DiscretizationCollection): |
|
from meshmode.distributed import get_connected_partitions |
|
return get_connected_partitions(dcoll._volume_discr.mesh) |
|
|
|
|
|
class _RankBoundaryCommunication: |
|
base_tag = 1273 |
|
|
|
def __init__(self, dcoll: DiscretizationCollection, |
|
remote_rank, vol_field, tag=None): |
|
self.tag = self.base_tag |
|
if tag is not None: |
|
self.tag += tag |
|
|
|
self.dcoll = dcoll |
|
self.array_context = vol_field.array_context |
|
self.remote_btag = BTAG_PARTITION(remote_rank) |
|
self.bdry_discr = dcoll.discr_from_dd(self.remote_btag) |
|
|
|
from grudge.op import project |
|
|
|
self.local_dof_array = project(dcoll, "vol", self.remote_btag, vol_field) |
|
|
|
local_data = self.array_context.to_numpy(flatten(self.local_dof_array)) |
|
comm = self.dcoll.mpi_communicator |
|
|
|
self.send_req = comm.Isend(local_data, remote_rank, tag=self.tag) |
|
self.remote_data_host = np.empty_like(local_data) |
|
self.recv_req = comm.Irecv(self.remote_data_host, remote_rank, self.tag) |
|
|
|
def finish(self): |
|
self.recv_req.Wait() |
|
|
|
actx = self.array_context |
|
remote_dof_array = unflatten( |
|
self.array_context, self.bdry_discr, |
|
actx.from_numpy(self.remote_data_host) |
|
) |
|
|
|
bdry_conn = self.dcoll.distributed_boundary_swap_connection( |
|
dof_desc.as_dofdesc(dof_desc.DTAG_BOUNDARY(self.remote_btag)) |
|
) |
|
swapped_remote_dof_array = bdry_conn(remote_dof_array) |
|
|
|
self.send_req.Wait() |
|
|
|
return TracePair(self.remote_btag, |
|
interior=self.local_dof_array, |
|
exterior=swapped_remote_dof_array) |
|
|
|
|
|
def _cross_rank_trace_pairs_scalar_field( |
|
dcoll: DiscretizationCollection, vec, tag=None) -> list: |
|
if isinstance(vec, Number): |
|
return [TracePair(BTAG_PARTITION(remote_rank), interior=vec, exterior=vec) |
|
for remote_rank in connected_ranks(dcoll)] |
|
else: |
|
rbcomms = [_RankBoundaryCommunication(dcoll, remote_rank, vec, tag=tag) |
|
for remote_rank in connected_ranks(dcoll)] |
|
return [rbcomm.finish() for rbcomm in rbcomms] |
|
|
|
|
|
def cross_rank_trace_pairs( |
|
dcoll: DiscretizationCollection, ary, tag=None) -> list: |
|
r"""Get a :class:`list` of *ary* trace pairs for each partition boundary. |
|
|
|
For each partition boundary, the field data values in *ary* are |
|
communicated to/from the neighboring partition. Presumably, this |
|
communication is MPI (but strictly speaking, may not be, and this |
|
routine is agnostic to the underlying communication). |
|
|
|
For each face on each partition boundary, a |
|
:class:`TracePair` is created with the locally, and |
|
remotely owned partition boundary face data as the `internal`, and `external` |
|
components, respectively. Each of the TracePair components are structured |
|
like *ary*. |
|
|
|
:arg ary: a single :class:`~meshmode.dof_array.DOFArray`, or an object |
|
array of :class:`~meshmode.dof_array.DOFArray`\ s |
|
of arbitrary shape. |
|
:returns: a :class:`list` of :class:`TracePair` objects. |
|
""" |
|
if isinstance(ary, np.ndarray): |
|
oshape = ary.shape |
|
comm_vec = ary.flatten() |
|
|
|
n, = comm_vec.shape |
|
result = {} |
|
# FIXME: Batch this communication rather than |
|
# doing it in sequence. |
|
for ivec in range(n): |
|
for rank_tpair in _cross_rank_trace_pairs_scalar_field( |
|
dcoll, comm_vec[ivec]): |
|
assert isinstance(rank_tpair.dd.domain_tag, dof_desc.DTAG_BOUNDARY) |
|
assert isinstance(rank_tpair.dd.domain_tag.tag, BTAG_PARTITION) |
|
result[rank_tpair.dd.domain_tag.tag.part_nr, ivec] = rank_tpair |
|
|
|
return [ |
|
TracePair( |
|
dd=dof_desc.as_dofdesc( |
|
dof_desc.DTAG_BOUNDARY(BTAG_PARTITION(remote_rank))), |
|
interior=make_obj_array([ |
|
result[remote_rank, i].int for i in range(n)]).reshape(oshape), |
|
exterior=make_obj_array([ |
|
result[remote_rank, i].ext for i in range(n)]).reshape(oshape) |
|
) for remote_rank in connected_ranks(dcoll) |
|
] |
|
else: |
|
return _cross_rank_trace_pairs_scalar_field(dcoll, ary, tag=tag) |
|
|
|
# }}} |