subroutine real_time_determ_projection()
! This subroutine gathers together partial_determ_vecs from each processor so
! that the full vector for the whole deterministic space is stored on each processor.
! It then performs the deterministic multiplication of the projector on this full vector.
use FciMCData, only: SemiStoch_Comms_Time
use FciMCData, only: SemiStoch_Multiply_Time
use Parallel_neci, only: MPIBarrier, MPIAllGatherV
integer :: i, j, ierr, run
associate(rep => cs_replicas(core_run))
call MPIBarrier(ierr)
call set_timer(SemiStoch_Comms_Time)
call MPIAllGatherV(rep%partial_determ_vecs, rep%full_determ_vecs, &
rep%determ_sizes, rep%determ_displs)
call halt_timer(SemiStoch_Comms_Time)
call set_timer(SemiStoch_Multiply_Time)
#ifdef CMPLX_
if (rep%determ_sizes(iProcIndex) >= 1) then
! Perform the multiplication.
rep%partial_determ_vecs = 0.0_dp
do i = 1, rep%determ_sizes(iProcIndex)
do j = 1, rep%sparse_core_ham(i)%num_elements
do run = 1, inum_runs
! real part of the 'spawn'
rep%partial_determ_vecs(min_part_type(run), i) = &
rep%partial_determ_vecs(min_part_type(run), i) + &
tau_real * Aimag(rep%sparse_core_ham(i)%elements(j)) * rep%full_determ_vecs( &
min_part_type(run), rep%sparse_core_ham(i)%positions(j)) + &
tau_real * Real(rep%sparse_core_ham(i)%elements(j)) * rep%full_determ_vecs( &
max_part_type(run), rep%sparse_core_ham(i)%positions(j)) + &
tau_imag * Real(rep%sparse_core_ham(i)%elements(j)) * rep%full_determ_vecs( &
min_part_type(run), rep%sparse_core_ham(i)%positions(j)) - &
tau_imag * Aimag(rep%sparse_core_ham(i)%elements(j)) * rep%full_determ_vecs( &
max_part_type(run), rep%sparse_core_ham(i)%positions(j))
! imaginary part
rep%partial_determ_vecs(max_part_type(run), i) = &
rep%partial_determ_vecs(max_part_type(run), i) - &
tau_real * Real(rep%sparse_core_ham(i)%elements(j)) * rep%full_determ_vecs( &
min_part_type(run), rep%sparse_core_ham(i)%positions(j)) + &
tau_real * Aimag(rep%sparse_core_ham(i)%elements(j)) * rep%full_determ_vecs( &
max_part_type(run), rep%sparse_core_ham(i)%positions(j)) + &
tau_imag * Real(rep%sparse_core_ham(i)%elements(j)) * rep%full_determ_vecs( &
max_part_type(run), rep%sparse_core_ham(i)%positions(j)) + &
tau_imag * Aimag(rep%sparse_core_ham(i)%elements(j)) * rep%full_determ_vecs( &
min_part_type(run), rep%sparse_core_ham(i)%positions(j))
end do
end do
end do
! Now add shift*rep%full_determ_vecs to account for the shift, not stored in
! rep%sparse_core_ham.
do i = 1, rep%determ_sizes(iProcIndex)
do run = 1, inum_runs
! real part
rep%partial_determ_vecs(min_part_type(run), i) = &
rep%partial_determ_vecs(min_part_type(run), i) + &
(Hii - gs_energy(run)) * rep%full_determ_vecs(max_part_type(run), i + &
rep%determ_displs(iProcIndex)) * &
tau_real + (tau_imag * (Hii - gs_energy(run) - DiagSft(run)) + &
tau_real * real_time_info%damping) * rep%full_determ_vecs( &
min_part_type(run), i + rep%determ_displs(iProcIndex))
! imaginary part
rep%partial_determ_vecs(max_part_type(run), i) = &
rep%partial_determ_vecs(max_part_type(run), i) + (tau_imag * &
(Hii - gs_energy(run) - DiagSft(run)) + tau_real &
* real_time_info%damping) * rep%full_determ_vecs( &
max_part_type(run), i + rep%determ_displs(iProcIndex)) - tau_real &
* (Hii - gs_energy(run)) * rep%full_determ_vecs(min_part_type(run), i &
+ rep%determ_displs(iProcIndex))
end do
end do
end if
#endif
call halt_timer(SemiStoch_Multiply_Time)
end associate
end subroutine real_time_determ_projection