loop_file Subroutine

private subroutine loop_file(this, lMat)

Apply the read_op_hdf5 of an lMat to the data in the currently opened file The file will be read chunkwise and the read_op_hdf5 operation applied per chunk @param[in] lMat the lMat object to read the data to

Type Bound

lMat_hdf5_read_t

Arguments

Type IntentOptional Attributes Name
class(lMat_hdf5_read_t), intent(inout) :: this
class(lMat_t), intent(inout) :: lMat

Contents

Source Code


Source Code

    subroutine loop_file(this, lMat)
        class(lMat_hdf5_read_t), intent(inout) :: this
        class(lMat_t), intent(inout) :: lMat

        real(dp) :: rVal
        logical :: running, any_running
        integer(hsize_t) :: blocksize, blockstart, blockend, this_blocksize
        integer(hsize_t), allocatable :: indices(:, :), entries(:, :)
        integer(MPIArg) :: ierr
        rVal = 0.0_dp

        ! reserve max. 128MB buffer size for dumpfile I/O
        blocksize = (2_hsize_t**27) .div. (7 * sizeof(0_int64))
        blockstart = this%offsets(iProcIndex_intra)

        blockend = min(blockstart + blocksize - 1, this%countsEnd)
        any_running = .true.
        running = .true.
        do while (any_running)
            if (running) then
                ! the number of elements to read in this block
                this_blocksize = blockend - blockstart + 1
            else
                this_blocksize = 0
            end if

            allocate(indices(6, this_blocksize), source=0_int64)
            allocate(entries(1, this_blocksize), source=0_int64)

            ! read in the data
            call read_2d_multi_chunk( &
                this%ds_vals, entries, h5kind_to_type(dp,H5_REAL_KIND), &
                [1_hsize_t, this_blocksize], &
                [0_hsize_t, blockstart], &
                [0_hsize_t, 0_hsize_t])

            call read_2d_multi_chunk( &
                this%ds_inds, indices, h5kind_to_type(int64,H5_INTEGER_KIND), &
                [6_hsize_t, this_blocksize], &
                [0_hsize_t, blockstart], &
                [0_hsize_t, 0_hsize_t])

            ! Do something with the read-in values
            ! If umat is updated, it has to be done using MPI RMA calls, so synchronization is
            ! required
            call umat_fence()
            ! This has to be threadsafe !!!
            call lMat%read_op_hdf5(indices, entries)
            call umat_fence()

            ! the read_op is allowed to deallocate if memory has to be made available
            if (allocated(entries)) deallocate(entries)
            if (allocated(indices)) deallocate(indices)

            ! set the size/offset for the next block
            if (running) then
                blockstart = blockend + 1
                blockend = min(blockstart + blocksize - 1, this%countsEnd)
                if (blockstart > this%countsEnd) running = .false.
            end if

            ! once all procs on this node are done reading, we can exit
            call MPI_ALLREDUCE(running, any_running, 1, MPI_LOGICAL, MPI_LOR, mpi_comm_intra, ierr)
        end do

    contains

        subroutine umat_fence()
            use IntegralsData, only: umat_win, nFrozen

            if(nFrozen > 0) call MPI_Win_fence(0,umat_win,ierr)
        end subroutine umat_fence

    end subroutine loop_file