@@ -67,12 +67,94 @@ void ModuleIO::write_eig_iter(const ModuleBase::matrix &ekb,const ModuleBase::ma
67
67
GlobalV::ofs_running << std::endl;
68
68
}
69
69
}
70
+
71
+
72
+ // =============================================================
73
+ // MPI communication:
74
+ // RANK 0 collect and print out #EIGENVALUES# AND #OCCUPATIONS# for all k-points
75
+ // =============================================================
76
+ #ifdef __MPI
77
+ MPI_Barrier (MPI_COMM_WORLD);
78
+
79
+ if (kpar > 1 && ip > 0 && PARAM.inp .out_alllog == 0 )
80
+ {
81
+
82
+ if (GlobalV::MY_RANK == 0 )
83
+ {
84
+
85
+ // for the current spin channel [is] and pool [ip]
86
+ // MPI_Recv the size of matrix, ik2iktot, ekb and wg from RANK_IN_POOL=0
87
+ MPI_Status status;
88
+ int recv_nks_np, recv_nbands;
89
+ MPI_Recv (&recv_nks_np, 1 , MPI_INT, MPI_ANY_SOURCE, 0 , MPI_COMM_WORLD, &status);
90
+ int source_rank = status.MPI_SOURCE ;
91
+ MPI_Recv (&recv_nbands, 1 , MPI_INT, source_rank, 1 , MPI_COMM_WORLD, MPI_STATUS_IGNORE);
92
+ int * recv_ik2iktot = new int [recv_nks_np];
93
+ MPI_Recv (recv_ik2iktot, recv_nks_np, MPI_INT, source_rank, 2 , MPI_COMM_WORLD, MPI_STATUS_IGNORE);
94
+ ModuleBase::matrix recv_ekb (recv_nks_np, recv_nbands);
95
+ MPI_Recv (recv_ekb.c , recv_nks_np * recv_nbands, MPI_DOUBLE, source_rank, 3 , MPI_COMM_WORLD, MPI_STATUS_IGNORE);
96
+ ModuleBase::matrix recv_wg (recv_nks_np, recv_nbands);
97
+ MPI_Recv (recv_wg.c , recv_nks_np * recv_nbands, MPI_DOUBLE, source_rank, 4 , MPI_COMM_WORLD, MPI_STATUS_IGNORE);
98
+
99
+ // print EIGENVALUES and OCCUPATIONS of received k-points
100
+ for (int ik = 0 ; ik < recv_nks_np; ++ik)
101
+ {
102
+ GlobalV::ofs_running << " spin=" << is+1 << " k-point="
103
+ << recv_ik2iktot[ik] + 1 - is * nkstot_np << std::endl;
104
+
105
+ GlobalV::ofs_running << std::setw (8 ) << " Index"
106
+ << std::setw (18 ) << " Eigenvalues(eV)"
107
+ << std::setw (18 ) << " Occupations" << std::endl;
108
+
109
+ for (int ib = 0 ; ib < recv_nbands; ib++)
110
+ {
111
+ GlobalV::ofs_running << std::setw (8 ) << ib + 1
112
+ << std::setw (18 ) << recv_ekb (ik, ib) * ModuleBase::Ry_to_eV
113
+ << std::setw (18 ) << recv_wg (ik, ib) << std::endl;
114
+ }
115
+ GlobalV::ofs_running << std::endl;
116
+ }
117
+
118
+ delete[] recv_ik2iktot;
119
+ }
120
+ else if (GlobalV::MY_POOL == ip && GlobalV::RANK_IN_POOL == 0 && GlobalV::MY_BNDGROUP == 0 )
121
+ {
122
+ // for the current spin channel [is] and pool [ip]
123
+ // MPI_Send the size of matrix, ik2iktot, ekb and wg to RANK=0
124
+ const int send_nks_np = nks_np;
125
+ const int send_nbands = ekb.nc ;
126
+ const int is_offset = is * nks_np;
127
+ int * send_ik2iktot = new int [send_nks_np];
128
+ for (int ik = 0 ; ik < send_nks_np; ++ik)
129
+ {
130
+ send_ik2iktot[ik] = kv.ik2iktot [is_offset + ik];
131
+ }
132
+ MPI_Send (&send_nks_np, 1 , MPI_INT, 0 , 0 , MPI_COMM_WORLD);
133
+ MPI_Send (&send_nbands, 1 , MPI_INT, 0 , 1 , MPI_COMM_WORLD);
134
+ MPI_Send (send_ik2iktot, send_nks_np, MPI_INT, 0 , 2 , MPI_COMM_WORLD);
135
+ MPI_Send (ekb.c + is_offset * send_nbands,
136
+ send_nks_np * send_nbands,
137
+ MPI_DOUBLE, 0 , 3 , MPI_COMM_WORLD);
138
+ MPI_Send (wg.c + is_offset * send_nbands,
139
+ send_nks_np * send_nbands,
140
+ MPI_DOUBLE, 0 , 4 , MPI_COMM_WORLD);
141
+
142
+ delete[] send_ik2iktot;
143
+ }
144
+
145
+ MPI_Barrier (MPI_COMM_WORLD);
146
+ }
147
+ // =============================================================
148
+ // End of MPI Communication
149
+ // =============================================================
150
+ #endif
70
151
}
71
152
#ifdef __MPI
72
153
MPI_Barrier (MPI_COMM_WORLD);
73
154
#endif
74
155
}
75
156
157
+
76
158
ModuleBase::timer::tick (" ModuleIO" , " write_eig_iter" );
77
159
}
78
160
0 commit comments