write_1d_integer_data_phdf5 Subroutine

private subroutine write_1d_integer_data_phdf5(wrt_buf, dataset_name, grp_id)

Writes in parallel contiguous up to 2D data distributed over MPI ranks into an archive. This routine assumes that the data varies across MPI ranks only in the final dimension

Arguments

Type IntentOptional Attributes Name
integer, intent(in), dimension(:) :: wrt_buf

Data to be written

character(len=*), intent(in) :: dataset_name

Name of dataset

integer(kind=hid_t), intent(in) :: grp_id

ID of group that dataset should be written into


Contents


Source Code

        subroutine write_1d_integer_data_phdf5(wrt_buf, dataset_name, grp_id)
            !> Data to be written
                integer, intent(in) , dimension(:) :: wrt_buf
            !> Name of dataset
            character(len=*), intent(in) :: dataset_name
#ifndef USE_HDF_
            integer, intent(in) :: grp_id
            character(*), parameter :: this_routine = 'write_nD_data_phdf5'
#endif
#ifdef USE_HDF_
            !> ID of group that dataset should be written into
            integer(hid_t), intent(in) :: grp_id
            !> Various filespace handles, rank of the tensor to be written
            integer(hid_t) :: filespace, memspace, dset_id, plist_id
            integer :: rank
            !> dimension of dataset to be written, block size during writing and write offset
            integer(hsize_t) :: dimsf(1), count(1), offset(1)
            !> HDF5 error code
            integer(hdf_err) :: err
            !> total length, i.e. sum of number of data on each MPI rank
            !> List containing length of distributed data on each MPI rank
            !> MPI error code
            integer :: tot_len_data, list_len_data(nProcessors), mpierr, data_shape(1)

            data_shape = shape(wrt_buf)
            ! inquire about the size of the last dimension on each MPI rank and
            ! their sum which is required for mapping out the disk space.
            call MPIAllGather(data_shape(size(data_shape)), list_len_data, mpierr)
            call MPIAllReduce(data_shape(size(data_shape)), MPI_SUM, tot_len_data)
                dimsf = [tot_len_data]
            rank = 1
            ! rank and length of dataspace
            call h5screate_simple_f(rank, dimsf, filespace, err)
            ! create the corresponding dataset and get dset_id
                call h5dcreate_f(grp_id, dataset_name, H5T_NATIVE_INTEGER, &
                                 filespace, dset_id, err)
            ! writing from memory a block called "count", mapping out the memspace
            count = shape(wrt_buf)
            call h5screate_simple_f(rank, count, memspace, err)

            ! Given two arrays of (4,2,2) and (4,2,20), the first one has offset
            ! (0,0,0) and the second (0,0,2)
            ! This also ensures that the ordering of different data sets can
            ! be correlated.
            if (iProcIndex == 0) then
                offset = 0 * data_shape
            else
                offset = [sum(list_len_data(1:iProcIndex))]
            end if

            ! set I/O pattern to "collective"
            call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, err)
            call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, err)
            ! In HDF5 collective I/O mode all collective operations have to be
            ! called the same number of times by each MPI rank; however, this call
            ! may happen asynchronously. On some MPI ranks there might be no data,
            ! then file- and memspace have to be selected as none.
            if (data_shape(size(data_shape)) == 0) then
                call h5sselect_none_f(filespace, err)
                call h5sselect_none_f(memspace, err)
            else
                call h5sselect_hyperslab_f(filespace, H5S_SELECT_SET_F, offset, &
                                           count, err)
            end if
                call h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, wrt_buf, count, err, &
                                file_space_id=filespace, mem_space_id=memspace, &
                                xfer_prp=plist_id)

            call h5pclose_f(plist_id, err)
            call h5sclose_f(filespace, err)
            call h5sclose_f(memspace, err)
            call h5dclose_f(dset_id, err)
#else
            call stop_all(this_routine, 'HDF5 support not enabled at compile time')
            unused_var(wrt_buf)
            unused_var(dataset_name)
            unused_var(grp_id)
#endif
          end subroutine write_1d_integer_data_phdf5