From c2e3791698f38824e9076cb62347e9c90263866c Mon Sep 17 00:00:00 2001 From: Mika Malinen Date: Tue, 14 Oct 2025 16:35:02 +0300 Subject: [PATCH 01/22] A fix related to the BodyId of a boundary element --- fem/src/ElementDescription.F90 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fem/src/ElementDescription.F90 b/fem/src/ElementDescription.F90 index ff1b70528d..06e406ac13 100644 --- a/fem/src/ElementDescription.F90 +++ b/fem/src/ElementDescription.F90 @@ -11139,8 +11139,10 @@ FUNCTION mGetElementDOFs( Indexes, UElement, USolver, NotDG, UMesh ) RESULT(nd) IF ( ASSOCIATED(Element % BoundaryInfo % Left) ) & id = Element % BoundaryInfo % Left % BodyId - IF ( ASSOCIATED(Element % BoundaryInfo % Right) ) & - id = Element % BoundaryInfo % Right % BodyId + IF (id == 0 .OR. id > CurrentModel % NumberOfBodies) THEN + IF ( ASSOCIATED(Element % BoundaryInfo % Right) ) & + id = Element % BoundaryInfo % Right % BodyId + END IF END IF ! ! In some cases it may happen that this function From 6cf67f0b0f4a1ad4e87f05420f91ce1adbf2448c Mon Sep 17 00:00:00 2001 From: Mika Malinen Date: Wed, 15 Oct 2025 12:49:49 +0300 Subject: [PATCH 02/22] Revise some checks related to BoundaryInfo % Right/Left --- fem/src/ElementDescription.F90 | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/fem/src/ElementDescription.F90 b/fem/src/ElementDescription.F90 index 06e406ac13..ad6af49146 100644 --- a/fem/src/ElementDescription.F90 +++ b/fem/src/ElementDescription.F90 @@ -11209,13 +11209,16 @@ FUNCTION mGetElementDOFs( Indexes, UElement, USolver, NotDG, UMesh ) RESULT(nd) DO j=1,Element % TYPE % NumberOfFaces Face => Mesh % Faces(Element % FaceIndexes(j)) face_type = Face % TYPE % ElementCode/100 + k = 0 IF (ASSOCIATED(Face % BoundaryInfo % Left)) THEN face_id = Face % BoundaryInfo % Left % BodyId k = MAX(0,Solver % Def_Dofs(face_type+6,face_id,5)) END IF - IF (ASSOCIATED(Face % BoundaryInfo % Right)) THEN - face_id = Face % BoundaryInfo % Right % BodyId - k = MAX(k,Solver % Def_Dofs(face_type+6,face_id,5)) + IF (k == 0) THEN + IF (ASSOCIATED(Face % BoundaryInfo % Right)) THEN + face_id = Face % BoundaryInfo % Right % BodyId + k = MAX(k,Solver % Def_Dofs(face_type+6,face_id,5)) + END IF END IF IF (k > 0) THEN NeedEdges = .TRUE. @@ -11293,9 +11296,11 @@ FUNCTION mGetElementDOFs( Indexes, UElement, USolver, NotDG, UMesh ) RESULT(nd) face_id = Face % BoundaryInfo % Left % BodyId k = MAX(0,Solver % Def_Dofs(face_type+6,face_id,5)) END IF - IF (ASSOCIATED(Face % BoundaryInfo % Right)) THEN - face_id = Face % BoundaryInfo % Right % BodyId - k = MAX(k,Solver % Def_Dofs(face_type+6,face_id,5)) + IF (k == 0) THEN + IF (ASSOCIATED(Face % BoundaryInfo % Right)) THEN + face_id = Face % BoundaryInfo % Right % BodyId + k = MAX(k,Solver % Def_Dofs(face_type+6,face_id,5)) + END IF END IF END IF @@ -11421,9 +11426,11 @@ FUNCTION mGetElementDOFs( Indexes, UElement, USolver, NotDG, UMesh ) RESULT(nd) face_id = Face % BoundaryInfo % Left % BodyId k = MAX(0,Solver % Def_Dofs(face_type+6,face_id,5)) END IF - IF (ASSOCIATED(Face % BoundaryInfo % Right)) THEN - face_id = Face % BoundaryInfo % Right % BodyId - k = MAX(k,Solver % Def_Dofs(face_type+6,face_id,5)) + IF (k == 0) THEN + IF (ASSOCIATED(Face % BoundaryInfo % Right)) THEN + face_id = Face % BoundaryInfo % Right % BodyId + k = MAX(k,Solver % Def_Dofs(face_type+6,face_id,5)) + END IF END IF END IF From f39095dc5074662484b979c324cf370d8d27f090 Mon Sep 17 00:00:00 2001 From: Mika Malinen Date: Wed, 15 Oct 2025 14:01:54 +0300 Subject: [PATCH 03/22] Update some comments --- fem/tests/p-FEM_with_varying_p/README.txt | 4 ++-- fem/tests/p-FEM_with_varying_p/cylindrical_shell.sif | 8 +++----- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/fem/tests/p-FEM_with_varying_p/README.txt b/fem/tests/p-FEM_with_varying_p/README.txt index 7c04340ea7..14dffb5e5a 100644 --- a/fem/tests/p-FEM_with_varying_p/README.txt +++ b/fem/tests/p-FEM_with_varying_p/README.txt @@ -28,8 +28,8 @@ The degree of approximation is defined by using a special keyword construct where the part "a_matc_function" is the name of a MATC function of a 4-tuple. The first argument is the identifier of the body while the remaining components -are the coordinates of the element mid-point. At the time of writing, this -seems to be the only functional way to get varying p within a single model. +are the coordinates of the element mid-point. For another way to define a body- +dependent element definition see the test p-FEM_two_solvers. The energy norm of the error for different p-element definitions (over the same 2 X 2 mesh, with p being fixed as p=8 in the boundary layer and p varying diff --git a/fem/tests/p-FEM_with_varying_p/cylindrical_shell.sif b/fem/tests/p-FEM_with_varying_p/cylindrical_shell.sif index d12cfd15a9..604e4b6f60 100644 --- a/fem/tests/p-FEM_with_varying_p/cylindrical_shell.sif +++ b/fem/tests/p-FEM_with_varying_p/cylindrical_shell.sif @@ -25,10 +25,8 @@ ! ! where the part "a_matc_function" is the name of a MATC function of a 4-tuple. ! The first argument is the identifier of the body while the remaining components -! are the coordinates of the element mid-point. At the time of writing, this -! seems to be the only functional way to get varying p within a single model. -! In this case the MATC-dependent element definition should be placed into -! Equation section. +! are the coordinates of the element mid-point. For another way to define a body- +! dependent element definition see the test p-FEM_two_solvers. ! ! The energy norm of the error for different p-element definitions (over the ! same 2 X 2 mesh, with p being fixed as p=8 in the boundary layer and p varying @@ -102,7 +100,6 @@ End Equation 1 Active Solvers(1) = 1 - Element = "p:%bodywisep" End Solver 1 @@ -120,6 +117,7 @@ Solver 1 Linear System GCR Restart = 100 Linear System Abort Not Converged = False Steady State Convergence Tolerance = 1e-09 + Element = "p:%bodywisep" End Solver 2 From 4a1b729862bb3fb57c3a25dc3a020c0f527e90b7 Mon Sep 17 00:00:00 2001 From: Thomas Zwinger Date: Wed, 15 Oct 2025 14:57:07 +0300 Subject: [PATCH 04/22] Delete fem/src/elmer_grid.h --- fem/src/elmer_grid.h | 43 ------------------------------------------- 1 file changed, 43 deletions(-) delete mode 100644 fem/src/elmer_grid.h diff --git a/fem/src/elmer_grid.h b/fem/src/elmer_grid.h deleted file mode 100644 index e3ead3b949..0000000000 --- a/fem/src/elmer_grid.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - Elmer, A Finite Element Software for Multiphysical Problems - - Copyright 1st April 1995 - , CSC - IT Center for Science Ltd., Finland - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library (in file ../../LGPL-2.1); if not, write - to the Free Software Foundation, Inc., 51 Franklin Street, - Fifth Floor, Boston, MA 02110-1301 USA -*/ - -/* Authors: Moritz Hanke (DKRZ), adapted by Thomas Zwinger - original code provided by Moritz Hanke (DKRZ) in the frame of the TerraDT project - Email: thomas.zwinger@csc.fi - Web: http://www.csc.fi/elmer - Address: CSC - IT Center for Science Ltd. - Keilaranta 14 - 02101 Espoo, Finland - - Original Date: 10.6.2025 -*/ - -#ifndef ELMER_GRID_H -#define ELMER_GRID_H - -void read_grid( - char const * grid_dir, int rank, int size, int num_parts, - int * nbr_vertices, int * nbr_cells, int ** num_vertices_per_cell, - double ** x_vertices, double ** y_vertices, - double ** x_cells, double ** y_cells, - int ** cell_ids, int ** vertex_ids, int ** cell_to_vertex); - -#endif // ELMER_GRID_H From e5927a690a02ef9ca3124511d36b612d7b2bac2d Mon Sep 17 00:00:00 2001 From: Thomas Zwinger Date: Wed, 15 Oct 2025 14:57:24 +0300 Subject: [PATCH 05/22] Delete fem/src/elmer_grid.c --- fem/src/elmer_grid.c | 312 ------------------------------------------- 1 file changed, 312 deletions(-) delete mode 100644 fem/src/elmer_grid.c diff --git a/fem/src/elmer_grid.c b/fem/src/elmer_grid.c deleted file mode 100644 index 2eae42aef6..0000000000 --- a/fem/src/elmer_grid.c +++ /dev/null @@ -1,312 +0,0 @@ -/* - Elmer, A Finite Element Software for Multiphysical Problems - - Copyright 1st April 1995 - , CSC - IT Center for Science Ltd., Finland - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library (in file ../../LGPL-2.1); if not, write - to the Free Software Foundation, Inc., 51 Franklin Street, - Fifth Floor, Boston, MA 02110-1301 USA -*/ - -/* Authors: Moritz Hanke (DKRZ), adapted by Thomas Zwinger - original code provided by Moritz Hanke (DKRZ) in the frame of the TerraDT project - Email: thomas.zwinger@csc.fi - Web: http://www.csc.fi/elmer - Address: CSC - IT Center for Science Ltd. - Keilaranta 14 - 02101 Espoo, Finland - - Original Date: 10.6.2025 -*/ -#include -#include -#include -#include -#include - -#include - -#include "elmer_grid.h" - -struct glb2loc { - int global_id, local_id; -}; - -static void compute_cell_centers( - int nbr_cells, int * cell_to_vertex, int * num_vertices_per_cell, - double * x_vertices, double * y_vertices, - double * x_cells, double * y_cells); - -static int compare_glb2loc_glb (const void * a, const void * b) { - return ((struct glb2loc*)a)->global_id - ((struct glb2loc*)b)->global_id; -} - -static void convert2rad( - double * x_vertices, double * y_vertices, int nbr_vertices) { - - // define transformation - PJ * P = - proj_create_crs_to_crs( - PJ_DEFAULT_CTX, "EPSG:3413", "+proj=longlat +datum=WGS84", NULL); - - if (!P) { - fputs("failed to create transformation", stderr); - MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); - } - - // transform all vertices - for (int i = 0; i < nbr_vertices; ++i) { - PJ_COORD src_coord = proj_coord(x_vertices[i], y_vertices[i], 0, 0); - PJ_COORD tgt_coord = proj_trans(P, PJ_FWD, src_coord); - x_vertices[i] = proj_torad(tgt_coord.lp.lam); - y_vertices[i] = proj_torad(tgt_coord.lp.phi); - } - - // clean up - proj_destroy(P); -} - -void read_grid( - char const * grid_dir, int rank, int size, int num_parts, - int * nbr_vertices, int * nbr_cells, int ** num_vertices_per_cell, - double ** x_vertices, double ** y_vertices, - double ** x_cells, double ** y_cells, - int ** cell_ids, int ** vertex_ids, int ** cell_to_vertex) { - - enum {NUM_VERT_PER_CELL = 3}; - - *nbr_vertices = 0; - *nbr_cells = 0; - *num_vertices_per_cell = NULL; - *x_vertices = NULL; - *y_vertices = NULL; - *cell_ids = NULL; - *vertex_ids = NULL; - *cell_to_vertex = NULL; - - if (!grid_dir) { - - fputs("invalid grid_dir\n", stderr); - MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); - } - - // distribute grid partitions across available ranks - int start_part_idx = - ((long long)num_parts * (long long)rank) / (long long)size; - int next_start_part_idx = - ((long long)num_parts * (long long)(rank+1)) / (long long)size; - - struct glb2loc * glb2loc_vert = NULL; - struct glb2loc * glb2loc_cell_vert = NULL; - - size_t grid_dir_len = strlen(grid_dir); - char header_filename[grid_dir_len + 64]; - char nodes_filename[grid_dir_len + 64]; - char elements_filename[grid_dir_len + 64]; - - // for all grid partitions of the local rank - for (int part_idx = start_part_idx; part_idx < next_start_part_idx; - ++part_idx) { - - // open files of current grid partition - - sprintf(header_filename, "%s/partitioning.%d/part.%d.header", grid_dir, num_parts, part_idx+1); - FILE * header_file = fopen(header_filename, "r"); - sprintf(nodes_filename, "%s/partitioning.%d/part.%d.nodes", grid_dir, num_parts, part_idx+1); - FILE * nodes_file = fopen(nodes_filename, "r"); - sprintf(elements_filename, "%s/partitioning.%d/part.%d.elements", grid_dir, num_parts, part_idx+1); - FILE * elements_file = fopen(elements_filename, "r"); - - if (!header_file || !nodes_file || !elements_file) { - fprintf( - stderr,"could not open grid files\n" - "header_file: %s %p\n" - "nodes_file: %s %p\n" - "elements_file: %s %p\n", - header_filename, header_file, - nodes_filename, nodes_file, - elements_filename, elements_file); - MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); - } - - // read header - int part_nbr_vertices, part_nbr_cells, part_nbr_edges; - fscanf( - header_file, "%d%d%d", - &part_nbr_vertices, &part_nbr_cells, &part_nbr_edges); - - // allocate arrays for nodes - *x_vertices = - realloc( - *x_vertices, - (size_t)(*nbr_vertices + part_nbr_vertices) * sizeof(**x_vertices)); - *y_vertices = - realloc( - *y_vertices, - (size_t)(*nbr_vertices + part_nbr_vertices) * sizeof(**y_vertices)); - *vertex_ids = - realloc( - *vertex_ids, - (size_t)(*nbr_vertices + part_nbr_vertices) * sizeof(**vertex_ids)); - glb2loc_vert = - realloc( - glb2loc_vert, - (size_t)(*nbr_vertices + part_nbr_vertices) * sizeof(*glb2loc_vert)); - - // read node data - for (int i = 0, j = *nbr_vertices; i < part_nbr_vertices; ++i, ++j) { - int dummy; - double z_vertices; - fscanf( - nodes_file, "%d%d%lf%lf%lf\n", - (*vertex_ids) + j, &dummy, - (*x_vertices) + j, *y_vertices + j, &z_vertices); - glb2loc_vert[j].global_id = (*vertex_ids)[j]; - glb2loc_vert[j].local_id = j; - } - *nbr_vertices += part_nbr_vertices; - - // allocate arrays for elements - *num_vertices_per_cell = - realloc( - *num_vertices_per_cell, - (size_t)(*nbr_cells + part_nbr_cells) * sizeof(**num_vertices_per_cell)); - *cell_ids = - realloc( - *cell_ids, - (size_t)(*nbr_cells + part_nbr_cells) * sizeof(**cell_ids)); - *cell_to_vertex = - realloc( - *cell_to_vertex, - (size_t)((*nbr_cells + part_nbr_cells) * NUM_VERT_PER_CELL) * - sizeof(**cell_to_vertex)); - glb2loc_cell_vert = - realloc( - glb2loc_cell_vert, - (size_t)((*nbr_cells + part_nbr_cells) * NUM_VERT_PER_CELL) * - sizeof(*glb2loc_cell_vert)); - - // read element data - for (int i = 0, j = *nbr_cells; i < part_nbr_cells; ++i, ++j) { - (*num_vertices_per_cell)[j] = NUM_VERT_PER_CELL; - int dummy_a, dummy_b; - fscanf( - elements_file, "%d%d%d%d%d%d\n", - (*cell_ids) + j, &dummy_a, &dummy_b, - &(glb2loc_cell_vert[3 * j + 0].global_id), - &(glb2loc_cell_vert[3 * j + 1].global_id), - &(glb2loc_cell_vert[3 * j + 2].global_id)); - glb2loc_cell_vert[3 * j + 0].local_id = 3 * j + 0; - glb2loc_cell_vert[3 * j + 1].local_id = 3 * j + 1; - glb2loc_cell_vert[3 * j + 2].local_id = 3 * j + 2; - } - *nbr_cells += part_nbr_cells; - - // close files - fclose(elements_file); - fclose(nodes_file); - fclose(header_file); - } - - // convert coordiantes to radian - convert2rad(*x_vertices, *y_vertices, *nbr_vertices); - - // sort global to local lookup by global ids - qsort( - glb2loc_vert, (size_t)*nbr_vertices, sizeof(*glb2loc_vert), - compare_glb2loc_glb); - qsort( - glb2loc_cell_vert, NUM_VERT_PER_CELL * (size_t)*nbr_cells, - sizeof(*glb2loc_cell_vert), - compare_glb2loc_glb); - - // for all cell vertices -> map global cell vertices to local ones - for (size_t i = 0, j = 0; i < NUM_VERT_PER_CELL * *nbr_cells; ++i) { - - while ((j < *nbr_vertices) && - (glb2loc_cell_vert[i].global_id > glb2loc_vert[j].global_id)) ++j; - - if ((j >= *nbr_vertices) || - (glb2loc_cell_vert[i].global_id != glb2loc_vert[j].global_id)) { - - fputs("could not match cell vertices to list of vertices\n", stderr); - MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); - } - - (*cell_to_vertex)[glb2loc_cell_vert[i].local_id] = glb2loc_vert[j].local_id; - } - - free(glb2loc_cell_vert); - free(glb2loc_vert); - - // compute cell centers from vertex coordiantes - *x_cells = malloc(*nbr_cells * sizeof(**x_cells)); - *y_cells = malloc(*nbr_cells * sizeof(**y_cells)); - compute_cell_centers( - *nbr_cells, *cell_to_vertex, *num_vertices_per_cell, - *x_vertices, *y_vertices, *x_cells, *y_cells); -} - -static inline void LLtoXYZ(double lon, double lat, double p_out[]) { - - while (lon < -M_PI) lon += 2.0 * M_PI; - while (lon >= M_PI) lon -= 2.0 * M_PI; - - double cos_lat = cos(lat); - p_out[0] = cos_lat * cos(lon); - p_out[1] = cos_lat * sin(lon); - p_out[2] = sin(lat); -} - -static inline void XYZtoLL (double const p_in[], double * lon, double * lat) { - - *lon = atan2(p_in[1] , p_in[0]); - *lat = M_PI_2 - acos(p_in[2]); -} - -static inline void normalise_vector(double v[]) { - - double norm = 1.0 / sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2]); - - v[0] *= norm; - v[1] *= norm; - v[2] *= norm; -} - -static void compute_cell_centers( - int nbr_cells, int * cell_to_vertex, int * num_vertices_per_cell, - double * x_vertices, double * y_vertices, - double * x_cells, double * y_cells) { - - for (int i = 0; i < nbr_cells; ++i) { - - int num_vertices = num_vertices_per_cell[i]; - double cell_center[3] = {0.0, 0.0, 0.0}; - - for (int j = 0; j < num_vertices; ++j) { - double vertex[3]; - LLtoXYZ( - x_vertices[cell_to_vertex[j]], y_vertices[cell_to_vertex[j]], vertex); - cell_center[0] += vertex[0]; - cell_center[1] += vertex[1]; - cell_center[2] += vertex[2]; - } - - normalise_vector(cell_center); - XYZtoLL(cell_center, x_cells + i, y_cells + i); - - cell_to_vertex += num_vertices; - } -} - From eb4b3c120a9d7b66a7c9da66859639dd637c7aa7 Mon Sep 17 00:00:00 2001 From: tzwinger Date: Wed, 15 Oct 2025 16:34:06 +0300 Subject: [PATCH 06/22] removed elmer_grid.c from file list in fem/src/CMakeLists.txt --- fem/src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fem/src/CMakeLists.txt b/fem/src/CMakeLists.txt index d5c8c1ae29..c992bee3e8 100644 --- a/fem/src/CMakeLists.txt +++ b/fem/src/CMakeLists.txt @@ -55,7 +55,7 @@ ELSE() ENDIF() IF(WITH_YAC) - LIST(APPEND solverlib_SOURCES elmer_grid.c elmer_coupling.F90) + LIST(APPEND solverlib_SOURCES elmer_coupling.F90) ENDIF() From a208f31ad734c7ce115c10d690817a010cec7c68 Mon Sep 17 00:00:00 2001 From: tzwinger Date: Wed, 15 Oct 2025 16:44:56 +0300 Subject: [PATCH 07/22] removed dummy grid reader read_grid_c --- fem/src/elmer_coupling.F90 | 34 +++++----------------------------- 1 file changed, 5 insertions(+), 29 deletions(-) diff --git a/fem/src/elmer_coupling.F90 b/fem/src/elmer_coupling.F90 index d358deb442..35a7a906bb 100644 --- a/fem/src/elmer_coupling.F90 +++ b/fem/src/elmer_coupling.F90 @@ -398,30 +398,6 @@ SUBROUTINE coupling_setup(grid_dir, num_parts, timestepstring) INTERFACE - SUBROUTINE read_grid_c(grid_dir, rank, size, num_parts, & - nbr_vertices, nbr_cells, num_vertices_per_cell, & - x_vertices, y_vertices, x_cells, y_cells, & - cell_ids, vertex_ids, cell_to_vertex) & - bind ( C, name='read_grid' ) - - USE, INTRINSIC :: iso_c_binding - - CHARACTER(KIND=C_CHAR) :: grid_dir(*) - INTEGER(KIND=C_INT), VALUE :: rank - INTEGER(KIND=C_INT), VALUE :: size - INTEGER(KIND=C_INT), VALUE :: num_parts - INTEGER(KIND=C_INT) :: nbr_vertices - INTEGER(KIND=C_INT) :: nbr_cells - TYPE(C_PTR) :: num_vertices_per_cell ! int ** - TYPE(C_PTR) :: x_vertices ! double ** - TYPE(C_PTR) :: y_vertices ! double ** - TYPE(C_PTR) :: x_cells ! double ** - TYPE(C_PTR) :: y_cells ! double ** - TYPE(C_PTR) :: cell_ids ! int ** - TYPE(C_PTR) :: vertex_ids ! int ** - TYPE(C_PTR) :: cell_to_vertex ! int ** - - END SUBROUTINE read_grid_c SUBROUTINE free_c ( ptr ) bind ( c, NAME='free' ) @@ -440,11 +416,11 @@ END SUBROUTINE free_c ! * each process only reads in its local part of the grid ! * in Elmer this information probably already available and does not have ! to be read from file - CALL read_grid_c( & - TRIM(grid_dir) // c_null_char, comm_rank, comm_size, num_parts, & - nbr_vertices, nbr_cells, num_vertices_per_cell_c_ptr, & - x_vertices_c_ptr, y_vertices_c_ptr, x_cells_c_ptr, y_cells_c_ptr, & - cell_ids_c_ptr, vertex_ids_c_ptr, cell_to_vertex_c_ptr) + !CALL read_grid_c( & + ! TRIM(grid_dir) // c_null_char, comm_rank, comm_size, num_parts, & + ! nbr_vertices, nbr_cells, num_vertices_per_cell_c_ptr, & + ! x_vertices_c_ptr, y_vertices_c_ptr, x_cells_c_ptr, y_cells_c_ptr, & + ! cell_ids_c_ptr, vertex_ids_c_ptr, cell_to_vertex_c_ptr) CALL C_F_POINTER( & num_vertices_per_cell_c_ptr, num_vertices_per_cell_c, shape=[nbr_cells]) From 5ac0c80f97ba171e83f9c367ae642a0eae699af7 Mon Sep 17 00:00:00 2001 From: tzwinger Date: Thu, 16 Oct 2025 11:37:40 +0300 Subject: [PATCH 08/22] changed order of ElmerIce config in main CmakeLists.tzt in order to enable tests that trigger on HAVE_UMFPACK --- CMakeLists.txt | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4cba213b5d..8de5fb20bd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -635,13 +635,6 @@ ENDIF() ADD_SUBDIRECTORY(matc) -IF(WITH_ElmerIce) - IF(NOT WITH_MPI) - MESSAGE(FATAL_ERROR "Building ElmerIce requires MPI.") - ENDIF() - MESSAGE(STATUS "Adding optional package ElmerIce") - ADD_SUBDIRECTORY(elmerice) -ENDIF(WITH_ElmerIce) IF (WITH_UMFPACK) IF (EXTERNAL_UMFPACK) @@ -678,6 +671,7 @@ IF(NOT DEFINED UMFPACK_LONG_FORTRAN_TYPE) SET(UMFPACK_LONG_FORTRAN_TYPE "C_LONG") ENDIF() + # Find external arpack and parpack IF(EXTERNAL_ARPACK) MESSAGE(STATUS "Using external arpack library instead of built-in") @@ -700,6 +694,15 @@ ADD_SUBDIRECTORY(mathlibs) ADD_SUBDIRECTORY(elmergrid) ADD_SUBDIRECTORY(license_texts) +IF(WITH_ElmerIce) + IF(NOT WITH_MPI) + MESSAGE(FATAL_ERROR "Building ElmerIce requires MPI.") + ENDIF() + MESSAGE(STATUS "Adding optional package ElmerIce") + ADD_SUBDIRECTORY(elmerice) +ENDIF(WITH_ElmerIce) + + IF(WITH_ELMERGUI) MESSAGE(STATUS " Building ElmerGUI") MESSAGE(STATUS "------------------------------------------------") From c74c393d07baeccb3c0ab2643429ac89792066ec Mon Sep 17 00:00:00 2001 From: tzwinger Date: Thu, 16 Oct 2025 14:37:41 +0300 Subject: [PATCH 09/22] removed default No Matrix in Flotation.F90 --- elmerice/Solvers/Flotation.F90 | 2 -- 1 file changed, 2 deletions(-) diff --git a/elmerice/Solvers/Flotation.F90 b/elmerice/Solvers/Flotation.F90 index f57a0241b5..564f8bdeb3 100644 --- a/elmerice/Solvers/Flotation.F90 +++ b/elmerice/Solvers/Flotation.F90 @@ -67,8 +67,6 @@ SUBROUTINE Flotation_init( Model,Solver,dt,TransientSimulation ) SolverParams => Solver % Values - CALL ListAddLogical( SolverParams,'No Matrix',.TRUE.) - ZbName = GetString(SolverParams, 'Bottom Surface Name', GotIt) IF (.NOT.GotIt) THEN CALL INFO(SolverName, 'Bottom Surface Name not found - using default Zb', level=3) From 93b140edac29bc78b07038943dd8949740750a83 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Thu, 16 Oct 2025 15:30:53 +0200 Subject: [PATCH 10/22] Refactoring for YAC coupling w.r.t comm split * Simplify detection of MPI handshake feature in XIOS. * Simplify Coloring. * Tidy preprocessor statements. --- CMakeLists.txt | 19 +++ cmake/Modules/FindYAC.cmake | 2 - cmake/Modules/testMPIHandshake.cmake | 28 +++++ fem/src/CMakeLists.txt | 6 - fem/src/Messages.F90 | 10 +- fem/src/SParIterComm.F90 | 162 +++++++++++++++---------- fem/src/elmer_coupling.F90 | 11 +- fem/src/mo_mpi_handshake.F90 | 42 ------- fem/src/mo_mpi_handshake_stub.F90 | 19 --- fem/src/mpi_handshake.c | 172 --------------------------- fem/src/mpi_handshake.h | 19 --- 11 files changed, 158 insertions(+), 332 deletions(-) create mode 100644 cmake/Modules/testMPIHandshake.cmake delete mode 100644 fem/src/mo_mpi_handshake.F90 delete mode 100644 fem/src/mo_mpi_handshake_stub.F90 delete mode 100644 fem/src/mpi_handshake.c delete mode 100644 fem/src/mpi_handshake.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 654754d44e..a7d53e5263 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -510,6 +510,15 @@ IF (WITH_XIOS) INCLUDE_DIRECTORIES(${XIOS_INCLUDE_DIR}) LINK_DIRECTORIES(${XIOS_LIBRARIES}) ADD_DEFINITIONS(-DHAVE_XIOS) + + INCLUDE(testMPIHandshake) + + MESSAGE(STATUS " XIOS_HAS_MPI_HANDSHAKE: " "${XIOS_HAS_MPI_HANDSHAKE}") + + IF(XIOS_HAS_MPI_HANDSHAKE) + ADD_DEFINITIONS(-DXIOS_HAS_MPI_HANDSHAKE) + ENDIF(XIOS_HAS_MPI_HANDSHAKE) + ELSE(XIOS_FOUND) MESSAGE(STATUS " ${XIOS_FAILMSG}") ENDIF(XIOS_FOUND) @@ -536,6 +545,16 @@ IF (WITH_YAC) "with -DWITH_NETCDF=ON and make sure NetCDF is found.") ENDIF() + MESSAGE(STATUS " XIOS_HAS_MPI_HANDSHAKE: " "${XIOS_HAS_MPI_HANDSHAKE}") + + IF(WITH_XIOS) + IF(NOT XIOS_HAS_MPI_HANDSHAKE) + MESSAGE(FATAL_ERROR "Building WITH_YAC and WITH_XIOS requires " + "XIOS_HAS_MPI_HANDSHAKE. Please rebuild XIOS with " + "MPI handshake support.") + ENDIF() + ENDIF() + FIND_PACKAGE(YAXT) MESSAGE(STATUS " YAXT_FOUND: " "${YAXT_FOUND}") diff --git a/cmake/Modules/FindYAC.cmake b/cmake/Modules/FindYAC.cmake index fb0a057a2f..da77a49bf7 100644 --- a/cmake/Modules/FindYAC.cmake +++ b/cmake/Modules/FindYAC.cmake @@ -51,9 +51,7 @@ IF (YAC_INCLUDE_DIR AND YAC_LIBRARY) # List of YAC static libraries SET(YACLIB_NAMES libyac.a - libyac_utils.a libyac_core.a - libyac_clapack.a libyac_mci.a libyac_mtime.a ) diff --git a/cmake/Modules/testMPIHandshake.cmake b/cmake/Modules/testMPIHandshake.cmake new file mode 100644 index 0000000000..2591e07d9a --- /dev/null +++ b/cmake/Modules/testMPIHandshake.cmake @@ -0,0 +1,28 @@ +MESSAGE(STATUS "Checking whether XIOS supports MPI Handshake") + +# Define the test source code (Fortran code) for the MPI Handshake check +FILE(WRITE "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/test_xios_mpi_handshake.f90" +" +PROGRAM test_mpi_handshake + ! Try importing mpi_handshake modules + USE xios, ONLY: xios_mpi_handshake, xios_MAX_GROUPNAME_LEN + IMPLICIT NONE +END PROGRAM test_mpi_handshake +") + +set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -I${XIOS_INCLUDE_DIR}") +TRY_COMPILE(TEST_XIOS_HAS_MPI_HANDSHAKE + ${CMAKE_BINARY_DIR} + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/test_xios_mpi_handshake.f90 + OUTPUT_VARIABLE OUTPUT +) + +# Check if the compilation was successful +IF(TEST_XIOS_HAS_MPI_HANDSHAKE) + message(STATUS "Checking whether XIOS supports MPI Handshake -- yes") + set(XIOS_HAS_MPI_HANDSHAKE 1 CACHE BOOL "") +ELSE(TEST_XIOS_HAS_MPI_HANDSHAKE) + message(STATUS "Checking whether XIOS supports MPI Handshake -- no") + set(XIOS_HAS_MPI_HANDSHAKE 0 CACHE BOOL "") +ENDIF(TEST_XIOS_HAS_MPI_HANDSHAKE) +MARK_AS_ADVANCED(XIOS_HAS_MPI_HANDSHAKE) diff --git a/fem/src/CMakeLists.txt b/fem/src/CMakeLists.txt index c992bee3e8..b51fba58d3 100644 --- a/fem/src/CMakeLists.txt +++ b/fem/src/CMakeLists.txt @@ -48,12 +48,6 @@ SET(solverlib_SOURCES AddrFunc.F90 NavierStokes.F90 NavierStokesGeneral.F90 #SET_PROPERTY(SOURCE MaxwellAxiS.F90 PROPERTY # COMPILE_DEFINITIONS FULL_INDUCTION) -IF(MPI_Fortran_HAVE_F90_MODULE) - LIST(APPEND solverlib_SOURCES mo_mpi_handshake.F90 mpi_handshake.c) -ELSE() - LIST(APPEND solverlib_SOURCES mo_mpi_handshake_stub.F90) -ENDIF() - IF(WITH_YAC) LIST(APPEND solverlib_SOURCES elmer_coupling.F90) ENDIF() diff --git a/fem/src/Messages.F90 b/fem/src/Messages.F90 index 4f318b7954..c6fd865277 100644 --- a/fem/src/Messages.F90 +++ b/fem/src/Messages.F90 @@ -52,12 +52,11 @@ MODULE Messages #ifdef HAVE_XIOS - USE XIOS + USE XIOS, ONLY: xios_context_finalize, xios_finalize #endif #ifdef HAVE_YAC - USE elmer_coupling - USE elmer_icon_coupling + USE elmer_coupling, ONLY: coupling_finalize #endif IMPLICIT NONE @@ -75,13 +74,8 @@ MODULE Messages INTEGER, PARAMETER :: EXIT_OK=0, EXIT_ERROR=1 -#ifdef HAVE_XIOS LOGICAL :: USE_XIOS = .FALSE. -#endif - -#ifdef HAVE_YAC LOGICAL :: USE_YAC = .FALSE. -#endif CONTAINS diff --git a/fem/src/SParIterComm.F90 b/fem/src/SParIterComm.F90 index 6c25dc4f84..049d1e46eb 100644 --- a/fem/src/SParIterComm.F90 +++ b/fem/src/SParIterComm.F90 @@ -49,8 +49,10 @@ MODULE SParIterComm USE LoadMod, ONLY : RealTime USE SParIterGlobals -#ifdef HAVE_XIOS - USE XIOS +! always use mpi_handshake if YAC is involved; if only XIOS is used, use +! mpi_handshake if XIOS requires it +#if defined(HAVE_YAC) || (defined(HAVE_XIOS) && defined(XIOS_HAS_MPI_HANDSHAKE)) +# define ELMER_USE_MPI_HANDSHAKE #endif #ifndef HAVE_PARMMG @@ -59,16 +61,43 @@ MODULE SParIterComm # endif #endif +#ifdef ELMER_USE_MPI_HANDSHAKE + ! do some compatibility checks first +# ifdef ELMER_COLOUR +# error "It looks like you are trying to use ELMER_COLOUR and mpi_handshake" +# error "at the same time. These features are incompatible. Please review" +# error "your configuration and dependencies." +# endif + +# if defined(HAVE_XIOS) && !defined(XIOS_HAS_MPI_HANDSHAKE) +# error "XIOS does not offer the MPI Handshake API (XIOS_HAS_MPI_HANDSHAKE" +# error "unset) is not defined, but ELMER_USE_MPI_HANDSHAKE is set." +# error "This is incompatible with YAC." +# endif + + ! import mpi_handshake from YAC or XIOS +# ifdef HAVE_XIOS + USE XIOS, ONLY: xios_get_global_id + + ! prefer mpi_handshake from XIOS if XIOS is used + USE XIOS, ONLY: mpi_handshake => xios_mpi_handshake, & + MAX_GROUPNAME_LEN => xios_MAX_GROUPNAME_LEN +# else + ! use mpi_handshake from YAC if only HAVE_YAC used without HAVE_XIOS + USE elmer_coupling, ONLY: mpi_handshake, MAX_GROUPNAME_LEN +# endif +#endif + +#ifdef HAVE_XIOS + USE XIOS, ONLY: xios_initialize, xios_context_finalize, xios_finalize +#endif + #ifdef HAVE_YAC - USE elmer_coupling, ONLY: coupling_init, coupling_finalize, coupling_setup, & - mpi_handshake, MAX_GROUPNAME_LEN - USE elmer_icon_coupling -#elif defined(ELMER_HAVE_MPI_MODULE) - ! If YAC is not used, use the mpi_handshake from mo_mpi_handshake.F90 - USE mo_mpi_handshake, ONLY: mpi_handshake, MAX_GROUPNAME_LEN -#else - ! If no MPI is present use a stub - USE mo_mpi_handshake_stub, ONLY: mpi_handshake, MAX_GROUPNAME_LEN + USE elmer_coupling, ONLY: & + coupling_init, & + coupling_finalize, & + coupling_setup, & + coupler_get_code_id #endif IMPLICIT NONE @@ -94,6 +123,7 @@ MODULE SParIterComm INCLUDE "mpif.h" #endif +#ifdef ELMER_USE_MPI_HANDSHAKE ! Used for MPI_handshake ! Classify communicator groups with labels INTEGER, PARAMETER :: MAX_NUM_GROUPS = 3 @@ -112,7 +142,7 @@ MODULE SParIterComm ! Group for ranks using XIOS, i.e. only Elmer (XIOS clients) + XIOS server INTEGER :: XIOS_GROUP_IDX = -1 CHARACTER(LEN=MAX_GROUPNAME_LEN) :: XIOS_LABEL - +#endif TYPE Buff_t REAL(KIND=dp), ALLOCATABLE :: rbuf(:) @@ -255,16 +285,15 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) CALL MPI_COMM_SIZE( MPI_COMM_WORLD, ParEnv % PEs, ierr ) CALL MPI_COMM_RANK( MPI_COMM_WORLD, ParEnv % MyPE, ierr ) -! Use mpi_handshake for comm splitting -! TODO how to make sure that mpi_handshake does not conflict with MPI_COMM_SPLIT based on ELMER_COLOUR? - -! Add Elmer group for comm splitting -NUM_GROUPS = NUM_GROUPS + 1 -ELMER_GROUP_IDX = NUM_GROUPS -CALL SetExecID() -GROUP_NAMES(ELMER_GROUP_IDX) = TRIM(ExecID) +#ifdef ELMER_USE_MPI_HANDSHAKE + ! Use mpi_handshake for comm splitting + ! Add Elmer group for comm splitting + NUM_GROUPS = NUM_GROUPS + 1 + ELMER_GROUP_IDX = NUM_GROUPS + CALL SetExecID() + GROUP_NAMES(ELMER_GROUP_IDX) = TRIM(ExecID) -#ifdef HAVE_XIOS +# ifdef HAVE_XIOS INQUIRE(FILE="iodef.xml", EXIST=USE_XIOS) ! add XIOS group for comm splitting IF (USE_XIOS) THEN @@ -275,70 +304,79 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) XIOS_GROUP_IDX = NUM_GROUPS GROUP_NAMES(XIOS_GROUP_IDX) = XIOS_LABEL ENDIF -#endif +# endif -#ifdef HAVE_YAC +# ifdef HAVE_YAC ! check config file and set flag USE_YAC WRITE(config_file,*) "coupling.yaml" INQUIRE(FILE="coupling.yaml", EXIST=USE_YAC) ! add YAC group for comm splitting IF (USE_YAC) THEN - ! Query mpi_handshake group label from coupler - CALL coupler_get_code_id(COUPLER_LABEL) - ! Add Group for coupler - NUM_GROUPS = NUM_GROUPS + 1 - COUPLER_GROUP_IDX = NUM_GROUPS - GROUP_NAMES(COUPLER_GROUP_IDX) = COUPLER_LABEL + ! Query mpi_handshake group label from coupler + CALL coupler_get_code_id(COUPLER_LABEL) + ! Add Group for coupler + NUM_GROUPS = NUM_GROUPS + 1 + COUPLER_GROUP_IDX = NUM_GROUPS + GROUP_NAMES(COUPLER_GROUP_IDX) = COUPLER_LABEL ENDIF -#endif +# endif -IF (NUM_GROUPS > MAX_NUM_GROUPS) THEN - WRITE( Message, * ) 'Too many communication groups defined.' + IF (NUM_GROUPS > MAX_NUM_GROUPS) THEN + WRITE( Message,'(A)') 'Too many communication groups defined.' CALL Fatal( 'ParCommInit', Message ) -ENDIF + ENDIF + + IF (USE_XIOS .OR. USE_YAC) THEN + CALL mpi_handshake(MPI_COMM_WORLD, GROUP_NAMES(1:NUM_GROUPS),& + GROUP_COMMS(1:NUM_GROUPS)) + ! Set ELMER_COMM_WORLD determined through mpi_handshake + ELMER_COMM_WORLD = GROUP_COMMS(ELMER_GROUP_IDX) + ELSE +#endif + +! The colour could be set to be some different if we want to couple +! ElmerSolver with some other software having MPI colour set to zero. +#ifndef ELMER_COLOUR +#define ELMER_COLOUR 0 +#endif -! Do comm splitting using handshake -CALL mpi_handshake(MPI_COMM_WORLD, GROUP_NAMES(1:NUM_GROUPS), GROUP_COMMS(1:NUM_GROUPS)) +CALL MPI_COMM_SPLIT(MPI_COMM_WORLD,ELMER_COLOUR,& +ParEnv % MyPE,ELMER_COMM_WORLD,ierr) -ELMER_COMM_WORLD = GROUP_COMMS(ELMER_GROUP_IDX) ! Set ELMER_COMM_WORLD determined through mpi_handshake +#ifdef ELMER_USE_MPI_HANDSHAKE + ENDIF +#endif ! Use XIOS library for IO ! Must have xios and iodef.xml present #ifdef HAVE_XIOS - INQUIRE(FILE="iodef.xml", EXIST=USE_XIOS) IF (USE_XIOS) THEN - WRITE(Message,*) "Using XIOS with config-file: iodef.xml" - CALL INFO("SparIterComm",Message,Level=25) - CALL SetExecID() - CALL xios_initialize(TRIM(ExecID), global_comm=GROUP_COMMS(XIOS_GROUP_IDX)) - ELSE -#ifndef ELMER_COLOUR -#define ELMER_COLOUR 0 -#endif - ! TODO potential incompatibility with MPI_Handshake - CALL MPI_COMM_SPLIT(MPI_COMM_WORLD,ELMER_COLOUR,& - ParEnv % MyPE,ELMER_COMM_WORLD,ierr) + WRITE( Message,'(A)') 'Using XIOS with config-file: iodef.xml' + CALL INFO("SparIterComm",Message,Level=25) + CALL SetExecID() +# ifdef ELMER_USE_MPI_HANDSHAKE + CALL xios_initialize( & + TRIM(ExecID), & + global_comm=GROUP_COMMS(XIOS_GROUP_IDX)) +# else + CALL xios_initialize( & + TRIM(ExecID), & + return_comm=ELMER_COMM_WORLD) +# endif ENDIF -#else - ! The colour could be set to be some different if we want to couple ElmerSolver with some other - ! software having MPI colour set to zero. -#ifndef ELMER_COLOUR -#define ELMER_COLOUR 0 #endif - CALL MPI_COMM_SPLIT(MPI_COMM_WORLD,ELMER_COLOUR,& - ParEnv % MyPE,ELMER_COMM_WORLD,ierr) -#endif -! Use YAC library for coupling -! #ifdef HAVE_YAC - INQUIRE(FILE="coupling.yaml", EXIST=USE_YAC) IF (USE_YAC) THEN WRITE(Message,'(A,A)') "Using YAC coupler with config-file:",TRIM(config_file) CALL INFO("SparIterComm",Message,Level=25) - CALL coupling_init(config_file, ELMER_COMM_WORLD, GROUP_COMMS(COUPLER_GROUP_IDX)) - END IF -#endif + ! TODO: Refactor to also provide GROUP_NAMES(XIOS_GROUP_IDX) here + ! CALL coupling_init("coupling.yaml", ELMER_COMM_WORLD,& + ! GROUP_COMMS(COUPLER_GROUP_IDX), GROUP_NAMES(ELMER_GROUP_IDX)) + CALL coupling_init("coupling.yaml", ELMER_COMM_WORLD,& + GROUP_COMMS(COUPLER_GROUP_IDX)) + ENDIF +#endif ParEnv % ActiveComm = ELMER_COMM_WORLD diff --git a/fem/src/elmer_coupling.F90 b/fem/src/elmer_coupling.F90 index 35a7a906bb..abb720b7df 100644 --- a/fem/src/elmer_coupling.F90 +++ b/fem/src/elmer_coupling.F90 @@ -286,6 +286,8 @@ MODULE elmer_coupling INTEGER, PARAMETER, PRIVATE :: MAX_CHARLEN = 132 INTEGER, PARAMETER, PUBLIC :: MAX_GROUPNAME_LEN = MAX_CHARLEN + ! TODO: Allow to set component name from outside + ! CHARACTER(LEN=MAX_CHARLEN) :: ELMER_COMP_NAME ! to make sure to have a single YAML file in case of multiple Elmer/Ice domains CHARACTER(LEN=MAX_CHARLEN), PARAMETER :: ELMER_COMP_NAME = "elmerice" CHARACTER(LEN=MAX_CHARLEN), PARAMETER :: ELMER_GRID_NAME = "elmer_grid" @@ -315,6 +317,8 @@ SUBROUTINE coupler_get_code_id(label) #endif END SUBROUTINE coupler_get_code_id + ! TODO: Refactor to also accept comp_name here + ! SUBROUTINE coupling_init(coupling_config_file, elmer_comm, yac_comm, comp_name) SUBROUTINE coupling_init(coupling_config_file, elmer_comm, yac_comm) IMPLICIT NONE @@ -323,6 +327,7 @@ SUBROUTINE coupling_init(coupling_config_file, elmer_comm, yac_comm) INTEGER, INTENT(IN) :: elmer_comm INTEGER, INTENT(IN) :: yac_comm + ! CHARACTER(LEN=MAX_GROUPNAME_LEN), INTENT(IN) :: comp_name INTEGER :: ierror @@ -345,6 +350,8 @@ SUBROUTINE coupling_init(coupling_config_file, elmer_comm, yac_comm) ! define component ! * is collective operation for all processes that initialised YAC + ! TODO: Allow to set component name from outside; currently hard-coded + ! ELMER_COMP_NAME = comp_name CALL yac_fdef_comp(ELMER_COMP_NAME, comp_id) ! get number of ranks for the elmer component @@ -357,7 +364,7 @@ END SUBROUTINE coupling_init SUBROUTINE coupling_setup(grid_dir, num_parts, timestepstring) USE :: elmer_icon_coupling - USE, INTRINSIC :: iso_c_binding + USE, INTRINSIC :: iso_c_binding, ONLY: C_INT, C_DOUBLE, C_PTR, C_F_POINTER, C_NULL_CHAR IMPLICIT NONE @@ -401,7 +408,7 @@ SUBROUTINE coupling_setup(grid_dir, num_parts, timestepstring) SUBROUTINE free_c ( ptr ) bind ( c, NAME='free' ) - USE, INTRINSIC :: iso_c_binding + USE, INTRINSIC :: iso_c_binding, ONLY: C_PTR TYPE(C_PTR), VALUE :: ptr diff --git a/fem/src/mo_mpi_handshake.F90 b/fem/src/mo_mpi_handshake.F90 deleted file mode 100644 index 4a3e02fe21..0000000000 --- a/fem/src/mo_mpi_handshake.F90 +++ /dev/null @@ -1,42 +0,0 @@ -MODULE mo_mpi_handshake - - - PUBLIC MAX_GROUPNAME_LEN - INTEGER, PARAMETER :: MAX_GROUPNAME_LEN = 64 - -CONTAINS - - SUBROUTINE mpi_handshake ( comm, group_names, group_comms ) - use, intrinsic :: iso_c_binding, only : c_ptr, c_char, c_null_char, c_loc - - implicit none - - interface - SUBROUTINE mpi_handshake_c2f (n, group_names, group_comms, comm) & - bind ( c, name='mpi_handshake_c2f') - use, intrinsic :: iso_c_binding, only : c_ptr, c_int - implicit none - integer(c_int), intent(in), value :: n - type (c_ptr) , intent(in) :: group_names(n) - integer, intent(out) :: group_comms(n) - integer, intent(in), value :: comm - end subroutine mpi_handshake_c2f - end interface - - integer, intent(in) :: comm - character(len=MAX_GROUPNAME_LEN), intent(in) :: group_names(:) - integer, intent(inout) :: group_comms(SIZE(group_names)) - - CHARACTER (kind=c_char, len=MAX_GROUPNAME_LEN), TARGET :: group_names_cpy(SIZE(group_names)) - type( c_ptr ) :: group_name_ptr(SIZE(group_names)) - integer :: i - DO i=1,SIZE(group_names) - group_names_cpy(i) = TRIM(group_names(i)) // c_null_char - group_name_ptr(i) = c_loc(group_names_cpy(i)) - END DO - - call mpi_handshake_c2f(SIZE(group_names), group_name_ptr, group_comms, comm) - - end subroutine mpi_handshake - -END MODULE mo_mpi_handshake diff --git a/fem/src/mo_mpi_handshake_stub.F90 b/fem/src/mo_mpi_handshake_stub.F90 deleted file mode 100644 index cf050bca4a..0000000000 --- a/fem/src/mo_mpi_handshake_stub.F90 +++ /dev/null @@ -1,19 +0,0 @@ -MODULE mo_mpi_handshake_stub - - - PUBLIC MAX_GROUPNAME_LEN - INTEGER, PARAMETER :: MAX_GROUPNAME_LEN = 64 - -CONTAINS - - SUBROUTINE mpi_handshake ( comm, group_names, group_comms ) - - implicit none - - integer, intent(in) :: comm - character(len=MAX_GROUPNAME_LEN), intent(in) :: group_names(:) - integer, intent(inout) :: group_comms(SIZE(group_names)) - - end subroutine mpi_handshake - -END MODULE mo_mpi_handshake_stub diff --git a/fem/src/mpi_handshake.c b/fem/src/mpi_handshake.c deleted file mode 100644 index 5143cbce3c..0000000000 --- a/fem/src/mpi_handshake.c +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Keywords: - * Maintainer: Moritz Hanke - * URL: https://gitlab.dkrz.de/dkrz-sw/mpi-handshake - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * Neither the name of the DKRZ GmbH nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS - * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include - -//taken from http://beige.ucs.indiana.edu/I590/node85.html -static void mh_mpi_error(int error_code, MPI_Comm comm) { - int rank; - MPI_Comm_rank(comm, &rank); - - char error_string[MPI_MAX_ERROR_STRING]; - int length_of_error_string, error_class; - - MPI_Error_class(error_code, &error_class); - MPI_Error_string(error_class, error_string, &length_of_error_string); - fprintf(stderr, "%3d: %s\n", rank, error_string); - MPI_Abort(comm, error_code); -} - -static void mh_abort( - const char * error_string, MPI_Comm comm, const char * file, int line) { - - int rank; - MPI_Comm_rank(comm, &rank); - - fprintf(stderr, "%3d: %s\n", rank, error_string); - fprintf(stderr, "Aborting in file %s, line %i ...\n", file, line ); - MPI_Abort(comm, EXIT_FAILURE); -} - -#define mh_mpi_call(call, comm) \ - do { \ - int error_code = (call); \ - if (error_code != MPI_SUCCESS) \ - mh_mpi_error(error_code, comm); \ - } while(0) - -#define mh_assert(exp, msg, comm) \ - {if(!((exp))) mh_abort(((msg)), ((comm)), __FILE__, __LINE__);} - -void mpi_handshake( - char const ** group_names, MPI_Comm * group_comms, size_t n, MPI_Comm comm) { - - // check whether comm is an intercomm - int is_intercomm; - mh_mpi_call(MPI_Comm_test_inter(comm, &is_intercomm), comm); - mh_assert( - !is_intercomm, - "ERROR(mpi_handshake): inter-communicators are not supported", comm); - - // STEP 1: Version exchange - enum {MPI_HANDSHAKE_VERSION = 1}; - int mh_version = MPI_HANDSHAKE_VERSION; - mh_mpi_call( - MPI_Allreduce( - MPI_IN_PLACE, &mh_version, 1, MPI_INT, MPI_MIN, comm), comm); - mh_assert( - mh_version == MPI_HANDSHAKE_VERSION, - "ERROR(mpi_handshake): version mismatch." - "This implementation of the MPI handshake only supports version 1", comm); - - // generate communicators - int rank, size; - MPI_Comm_rank(comm, &rank); - MPI_Comm_size(comm, &size); - for (size_t i = 0; i < n; ++i) group_comms[i] = MPI_COMM_NULL; - - while(1){ - // STEP 2: determine broadcasting rank - size_t group_idx = SIZE_MAX; - for (size_t i = 0; (i < n) && (group_idx == SIZE_MAX); ++i) - if (group_comms[i] == MPI_COMM_NULL) group_idx = i; - int broadcasting_rank = group_idx != SIZE_MAX ? rank : size; - mh_mpi_call( - MPI_Allreduce( - MPI_IN_PLACE, &broadcasting_rank, 1, MPI_INT, MPI_MIN, comm), comm); - mh_assert(broadcasting_rank >= 0 && broadcasting_rank <= size, - "ERROR(mpi_handshake): " - "broadcasting rank cannot be negativ or greater than communicator size.", - comm); - if (broadcasting_rank == size) break; - - // STEP 3: broadcast group name - int group_name_buffer_size = 0; - if(broadcasting_rank == rank){ - size_t len = strlen(group_names[group_idx]); - mh_assert(len <= INT_MAX, - "ERROR(yac_mpi_handshake): group name is too long", comm); - group_name_buffer_size = (int)len; - } - MPI_Bcast(&group_name_buffer_size, 1, MPI_INT, broadcasting_rank, comm); - char * group_name_buffer = - malloc((group_name_buffer_size+1) * sizeof(*group_name_buffer)); - mh_assert( - group_name_buffer, - "ERROR(mpi_handshake): failed to allocate group name buffer", comm); - if (broadcasting_rank == rank) - strcpy(group_name_buffer, group_names[group_idx]); - mh_mpi_call( - MPI_Bcast( - group_name_buffer, group_name_buffer_size, MPI_CHAR, broadcasting_rank, comm), comm); - group_name_buffer[group_name_buffer_size] = '\0'; - - // STEP 4: split communicator - group_idx = SIZE_MAX; - for (size_t i = 0; (i < n) && (group_idx == SIZE_MAX); ++i) - if (!strcmp(group_name_buffer, group_names[i])){ - mh_assert(group_comms[i] == MPI_COMM_NULL, - "ERROR(yac_mpi_handshake): " - "Group communicator for a group that was already created " - "was broadcasted again.", comm); - group_idx = i; - } - free(group_name_buffer); - MPI_Comm group_comm; - mh_mpi_call( - MPI_Comm_split( - comm, (group_idx != SIZE_MAX)?0:MPI_UNDEFINED, rank, &group_comm), - comm); - if (group_idx != SIZE_MAX) { - group_comms[group_idx] = group_comm; - } - } -} - -void mpi_handshake_c2f(int n, char const ** group_names, - MPI_Fint * group_comms, MPI_Fint comm) -{ - MPI_Comm comm_c = MPI_Comm_f2c(comm); - MPI_Comm * group_comms_c = malloc(n*sizeof(*group_comms_c)); - mpi_handshake(group_names, group_comms_c, n, comm_c); - for(int i = 0; i Date: Mon, 3 Nov 2025 13:57:31 +0100 Subject: [PATCH 11/22] Prefer END IF over ENDIF. --- fem/src/SParIterComm.F90 | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/fem/src/SParIterComm.F90 b/fem/src/SParIterComm.F90 index bef626728f..df9c2ec395 100644 --- a/fem/src/SParIterComm.F90 +++ b/fem/src/SParIterComm.F90 @@ -303,7 +303,7 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) NUM_GROUPS = NUM_GROUPS + 1 XIOS_GROUP_IDX = NUM_GROUPS GROUP_NAMES(XIOS_GROUP_IDX) = XIOS_LABEL - ENDIF + END IF # endif # ifdef HAVE_YAC @@ -318,13 +318,13 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) NUM_GROUPS = NUM_GROUPS + 1 COUPLER_GROUP_IDX = NUM_GROUPS GROUP_NAMES(COUPLER_GROUP_IDX) = COUPLER_LABEL - ENDIF + END IF # endif IF (NUM_GROUPS > MAX_NUM_GROUPS) THEN WRITE( Message,'(A)') 'Too many communication groups defined.' CALL Fatal( 'ParCommInit', Message ) - ENDIF + END IF IF (USE_XIOS .OR. USE_YAC) THEN CALL mpi_handshake(MPI_COMM_WORLD, GROUP_NAMES(1:NUM_GROUPS),& @@ -344,7 +344,7 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) ParEnv % MyPE,ELMER_COMM_WORLD,ierr) #ifdef ELMER_USE_MPI_HANDSHAKE - ENDIF + END IF #endif ! Use XIOS library for IO @@ -363,7 +363,7 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) TRIM(ExecID), & return_comm=ELMER_COMM_WORLD) # endif - ENDIF + END IF #endif #ifdef HAVE_YAC @@ -970,7 +970,7 @@ SUBROUTINE SParEdgeNumbering( Mesh, Allmesh ) ELSE parentnodes(i,1) = q parentnodes(i,2) = p - ENDIF + END IF ! This is the list of PEs sharing parent node 1: list1 => nb(parentnodes(i,1)) % Neighbours @@ -2248,7 +2248,7 @@ SUBROUTINE SParGlobalNumbering( Mesh, OldMesh, NewNodeCnt, Reorder ) parentnodes(i,2) = l ! This is the list of PEs sharing parent node 2: list2 => Mesh % ParallelInfo % NeighbourList(l) % Neighbours - END IF + END IF END DO END DO ! @@ -4420,7 +4420,7 @@ SUBROUTINE BuildRevVecIndices( SplittedMatrix ) END DO !print*,parenv % mype, ' <<<<-----', sproc, realtime()-tt; flush(6) DEALLOCATE( Gindices ) - END IF + END IF END DO !print*,parenv % mype, 'first recv: ', realTime()-tt @@ -5109,12 +5109,12 @@ SUBROUTINE ParEnvFinalize() CALL coupling_finalize() END IF #endif - + #ifdef HAVE_XIOS IF (USE_XIOS) THEN CALL xios_context_finalize() CALL xios_finalize() - ENDIF + END IF #endif IF (.NOT. ParEnv % ExternalInit) THEN From 272d3d53585a9c6e730d963da343c1398163e4b3 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 3 Nov 2025 15:33:45 +0100 Subject: [PATCH 12/22] Revert changes related to elmer_grid. --- fem/src/elmer_coupling.F90 | 34 +++- fem/src/elmer_grid.c | 312 +++++++++++++++++++++++++++++++++++++ fem/src/elmer_grid.h | 43 +++++ 3 files changed, 384 insertions(+), 5 deletions(-) create mode 100644 fem/src/elmer_grid.c create mode 100644 fem/src/elmer_grid.h diff --git a/fem/src/elmer_coupling.F90 b/fem/src/elmer_coupling.F90 index abb720b7df..e9cac87d8e 100644 --- a/fem/src/elmer_coupling.F90 +++ b/fem/src/elmer_coupling.F90 @@ -405,6 +405,30 @@ SUBROUTINE coupling_setup(grid_dir, num_parts, timestepstring) INTERFACE + SUBROUTINE read_grid_c(grid_dir, rank, size, num_parts, & + nbr_vertices, nbr_cells, num_vertices_per_cell, & + x_vertices, y_vertices, x_cells, y_cells, & + cell_ids, vertex_ids, cell_to_vertex) & + bind ( C, name='read_grid' ) + + USE, INTRINSIC :: iso_c_binding + + CHARACTER(KIND=C_CHAR) :: grid_dir(*) + INTEGER(KIND=C_INT), VALUE :: rank + INTEGER(KIND=C_INT), VALUE :: size + INTEGER(KIND=C_INT), VALUE :: num_parts + INTEGER(KIND=C_INT) :: nbr_vertices + INTEGER(KIND=C_INT) :: nbr_cells + TYPE(C_PTR) :: num_vertices_per_cell ! int ** + TYPE(C_PTR) :: x_vertices ! double ** + TYPE(C_PTR) :: y_vertices ! double ** + TYPE(C_PTR) :: x_cells ! double ** + TYPE(C_PTR) :: y_cells ! double ** + TYPE(C_PTR) :: cell_ids ! int ** + TYPE(C_PTR) :: vertex_ids ! int ** + TYPE(C_PTR) :: cell_to_vertex ! int ** + + END SUBROUTINE read_grid_c SUBROUTINE free_c ( ptr ) bind ( c, NAME='free' ) @@ -423,11 +447,11 @@ END SUBROUTINE free_c ! * each process only reads in its local part of the grid ! * in Elmer this information probably already available and does not have ! to be read from file - !CALL read_grid_c( & - ! TRIM(grid_dir) // c_null_char, comm_rank, comm_size, num_parts, & - ! nbr_vertices, nbr_cells, num_vertices_per_cell_c_ptr, & - ! x_vertices_c_ptr, y_vertices_c_ptr, x_cells_c_ptr, y_cells_c_ptr, & - ! cell_ids_c_ptr, vertex_ids_c_ptr, cell_to_vertex_c_ptr) + CALL read_grid_c( & + TRIM(grid_dir) // c_null_char, comm_rank, comm_size, num_parts, & + nbr_vertices, nbr_cells, num_vertices_per_cell_c_ptr, & + x_vertices_c_ptr, y_vertices_c_ptr, x_cells_c_ptr, y_cells_c_ptr, & + cell_ids_c_ptr, vertex_ids_c_ptr, cell_to_vertex_c_ptr) CALL C_F_POINTER( & num_vertices_per_cell_c_ptr, num_vertices_per_cell_c, shape=[nbr_cells]) diff --git a/fem/src/elmer_grid.c b/fem/src/elmer_grid.c new file mode 100644 index 0000000000..2eae42aef6 --- /dev/null +++ b/fem/src/elmer_grid.c @@ -0,0 +1,312 @@ +/* + Elmer, A Finite Element Software for Multiphysical Problems + + Copyright 1st April 1995 - , CSC - IT Center for Science Ltd., Finland + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library (in file ../../LGPL-2.1); if not, write + to the Free Software Foundation, Inc., 51 Franklin Street, + Fifth Floor, Boston, MA 02110-1301 USA +*/ + +/* Authors: Moritz Hanke (DKRZ), adapted by Thomas Zwinger + original code provided by Moritz Hanke (DKRZ) in the frame of the TerraDT project + Email: thomas.zwinger@csc.fi + Web: http://www.csc.fi/elmer + Address: CSC - IT Center for Science Ltd. + Keilaranta 14 + 02101 Espoo, Finland + + Original Date: 10.6.2025 +*/ +#include +#include +#include +#include +#include + +#include + +#include "elmer_grid.h" + +struct glb2loc { + int global_id, local_id; +}; + +static void compute_cell_centers( + int nbr_cells, int * cell_to_vertex, int * num_vertices_per_cell, + double * x_vertices, double * y_vertices, + double * x_cells, double * y_cells); + +static int compare_glb2loc_glb (const void * a, const void * b) { + return ((struct glb2loc*)a)->global_id - ((struct glb2loc*)b)->global_id; +} + +static void convert2rad( + double * x_vertices, double * y_vertices, int nbr_vertices) { + + // define transformation + PJ * P = + proj_create_crs_to_crs( + PJ_DEFAULT_CTX, "EPSG:3413", "+proj=longlat +datum=WGS84", NULL); + + if (!P) { + fputs("failed to create transformation", stderr); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + } + + // transform all vertices + for (int i = 0; i < nbr_vertices; ++i) { + PJ_COORD src_coord = proj_coord(x_vertices[i], y_vertices[i], 0, 0); + PJ_COORD tgt_coord = proj_trans(P, PJ_FWD, src_coord); + x_vertices[i] = proj_torad(tgt_coord.lp.lam); + y_vertices[i] = proj_torad(tgt_coord.lp.phi); + } + + // clean up + proj_destroy(P); +} + +void read_grid( + char const * grid_dir, int rank, int size, int num_parts, + int * nbr_vertices, int * nbr_cells, int ** num_vertices_per_cell, + double ** x_vertices, double ** y_vertices, + double ** x_cells, double ** y_cells, + int ** cell_ids, int ** vertex_ids, int ** cell_to_vertex) { + + enum {NUM_VERT_PER_CELL = 3}; + + *nbr_vertices = 0; + *nbr_cells = 0; + *num_vertices_per_cell = NULL; + *x_vertices = NULL; + *y_vertices = NULL; + *cell_ids = NULL; + *vertex_ids = NULL; + *cell_to_vertex = NULL; + + if (!grid_dir) { + + fputs("invalid grid_dir\n", stderr); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + } + + // distribute grid partitions across available ranks + int start_part_idx = + ((long long)num_parts * (long long)rank) / (long long)size; + int next_start_part_idx = + ((long long)num_parts * (long long)(rank+1)) / (long long)size; + + struct glb2loc * glb2loc_vert = NULL; + struct glb2loc * glb2loc_cell_vert = NULL; + + size_t grid_dir_len = strlen(grid_dir); + char header_filename[grid_dir_len + 64]; + char nodes_filename[grid_dir_len + 64]; + char elements_filename[grid_dir_len + 64]; + + // for all grid partitions of the local rank + for (int part_idx = start_part_idx; part_idx < next_start_part_idx; + ++part_idx) { + + // open files of current grid partition + + sprintf(header_filename, "%s/partitioning.%d/part.%d.header", grid_dir, num_parts, part_idx+1); + FILE * header_file = fopen(header_filename, "r"); + sprintf(nodes_filename, "%s/partitioning.%d/part.%d.nodes", grid_dir, num_parts, part_idx+1); + FILE * nodes_file = fopen(nodes_filename, "r"); + sprintf(elements_filename, "%s/partitioning.%d/part.%d.elements", grid_dir, num_parts, part_idx+1); + FILE * elements_file = fopen(elements_filename, "r"); + + if (!header_file || !nodes_file || !elements_file) { + fprintf( + stderr,"could not open grid files\n" + "header_file: %s %p\n" + "nodes_file: %s %p\n" + "elements_file: %s %p\n", + header_filename, header_file, + nodes_filename, nodes_file, + elements_filename, elements_file); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + } + + // read header + int part_nbr_vertices, part_nbr_cells, part_nbr_edges; + fscanf( + header_file, "%d%d%d", + &part_nbr_vertices, &part_nbr_cells, &part_nbr_edges); + + // allocate arrays for nodes + *x_vertices = + realloc( + *x_vertices, + (size_t)(*nbr_vertices + part_nbr_vertices) * sizeof(**x_vertices)); + *y_vertices = + realloc( + *y_vertices, + (size_t)(*nbr_vertices + part_nbr_vertices) * sizeof(**y_vertices)); + *vertex_ids = + realloc( + *vertex_ids, + (size_t)(*nbr_vertices + part_nbr_vertices) * sizeof(**vertex_ids)); + glb2loc_vert = + realloc( + glb2loc_vert, + (size_t)(*nbr_vertices + part_nbr_vertices) * sizeof(*glb2loc_vert)); + + // read node data + for (int i = 0, j = *nbr_vertices; i < part_nbr_vertices; ++i, ++j) { + int dummy; + double z_vertices; + fscanf( + nodes_file, "%d%d%lf%lf%lf\n", + (*vertex_ids) + j, &dummy, + (*x_vertices) + j, *y_vertices + j, &z_vertices); + glb2loc_vert[j].global_id = (*vertex_ids)[j]; + glb2loc_vert[j].local_id = j; + } + *nbr_vertices += part_nbr_vertices; + + // allocate arrays for elements + *num_vertices_per_cell = + realloc( + *num_vertices_per_cell, + (size_t)(*nbr_cells + part_nbr_cells) * sizeof(**num_vertices_per_cell)); + *cell_ids = + realloc( + *cell_ids, + (size_t)(*nbr_cells + part_nbr_cells) * sizeof(**cell_ids)); + *cell_to_vertex = + realloc( + *cell_to_vertex, + (size_t)((*nbr_cells + part_nbr_cells) * NUM_VERT_PER_CELL) * + sizeof(**cell_to_vertex)); + glb2loc_cell_vert = + realloc( + glb2loc_cell_vert, + (size_t)((*nbr_cells + part_nbr_cells) * NUM_VERT_PER_CELL) * + sizeof(*glb2loc_cell_vert)); + + // read element data + for (int i = 0, j = *nbr_cells; i < part_nbr_cells; ++i, ++j) { + (*num_vertices_per_cell)[j] = NUM_VERT_PER_CELL; + int dummy_a, dummy_b; + fscanf( + elements_file, "%d%d%d%d%d%d\n", + (*cell_ids) + j, &dummy_a, &dummy_b, + &(glb2loc_cell_vert[3 * j + 0].global_id), + &(glb2loc_cell_vert[3 * j + 1].global_id), + &(glb2loc_cell_vert[3 * j + 2].global_id)); + glb2loc_cell_vert[3 * j + 0].local_id = 3 * j + 0; + glb2loc_cell_vert[3 * j + 1].local_id = 3 * j + 1; + glb2loc_cell_vert[3 * j + 2].local_id = 3 * j + 2; + } + *nbr_cells += part_nbr_cells; + + // close files + fclose(elements_file); + fclose(nodes_file); + fclose(header_file); + } + + // convert coordiantes to radian + convert2rad(*x_vertices, *y_vertices, *nbr_vertices); + + // sort global to local lookup by global ids + qsort( + glb2loc_vert, (size_t)*nbr_vertices, sizeof(*glb2loc_vert), + compare_glb2loc_glb); + qsort( + glb2loc_cell_vert, NUM_VERT_PER_CELL * (size_t)*nbr_cells, + sizeof(*glb2loc_cell_vert), + compare_glb2loc_glb); + + // for all cell vertices -> map global cell vertices to local ones + for (size_t i = 0, j = 0; i < NUM_VERT_PER_CELL * *nbr_cells; ++i) { + + while ((j < *nbr_vertices) && + (glb2loc_cell_vert[i].global_id > glb2loc_vert[j].global_id)) ++j; + + if ((j >= *nbr_vertices) || + (glb2loc_cell_vert[i].global_id != glb2loc_vert[j].global_id)) { + + fputs("could not match cell vertices to list of vertices\n", stderr); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + } + + (*cell_to_vertex)[glb2loc_cell_vert[i].local_id] = glb2loc_vert[j].local_id; + } + + free(glb2loc_cell_vert); + free(glb2loc_vert); + + // compute cell centers from vertex coordiantes + *x_cells = malloc(*nbr_cells * sizeof(**x_cells)); + *y_cells = malloc(*nbr_cells * sizeof(**y_cells)); + compute_cell_centers( + *nbr_cells, *cell_to_vertex, *num_vertices_per_cell, + *x_vertices, *y_vertices, *x_cells, *y_cells); +} + +static inline void LLtoXYZ(double lon, double lat, double p_out[]) { + + while (lon < -M_PI) lon += 2.0 * M_PI; + while (lon >= M_PI) lon -= 2.0 * M_PI; + + double cos_lat = cos(lat); + p_out[0] = cos_lat * cos(lon); + p_out[1] = cos_lat * sin(lon); + p_out[2] = sin(lat); +} + +static inline void XYZtoLL (double const p_in[], double * lon, double * lat) { + + *lon = atan2(p_in[1] , p_in[0]); + *lat = M_PI_2 - acos(p_in[2]); +} + +static inline void normalise_vector(double v[]) { + + double norm = 1.0 / sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2]); + + v[0] *= norm; + v[1] *= norm; + v[2] *= norm; +} + +static void compute_cell_centers( + int nbr_cells, int * cell_to_vertex, int * num_vertices_per_cell, + double * x_vertices, double * y_vertices, + double * x_cells, double * y_cells) { + + for (int i = 0; i < nbr_cells; ++i) { + + int num_vertices = num_vertices_per_cell[i]; + double cell_center[3] = {0.0, 0.0, 0.0}; + + for (int j = 0; j < num_vertices; ++j) { + double vertex[3]; + LLtoXYZ( + x_vertices[cell_to_vertex[j]], y_vertices[cell_to_vertex[j]], vertex); + cell_center[0] += vertex[0]; + cell_center[1] += vertex[1]; + cell_center[2] += vertex[2]; + } + + normalise_vector(cell_center); + XYZtoLL(cell_center, x_cells + i, y_cells + i); + + cell_to_vertex += num_vertices; + } +} + diff --git a/fem/src/elmer_grid.h b/fem/src/elmer_grid.h new file mode 100644 index 0000000000..e3ead3b949 --- /dev/null +++ b/fem/src/elmer_grid.h @@ -0,0 +1,43 @@ +/* + Elmer, A Finite Element Software for Multiphysical Problems + + Copyright 1st April 1995 - , CSC - IT Center for Science Ltd., Finland + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library (in file ../../LGPL-2.1); if not, write + to the Free Software Foundation, Inc., 51 Franklin Street, + Fifth Floor, Boston, MA 02110-1301 USA +*/ + +/* Authors: Moritz Hanke (DKRZ), adapted by Thomas Zwinger + original code provided by Moritz Hanke (DKRZ) in the frame of the TerraDT project + Email: thomas.zwinger@csc.fi + Web: http://www.csc.fi/elmer + Address: CSC - IT Center for Science Ltd. + Keilaranta 14 + 02101 Espoo, Finland + + Original Date: 10.6.2025 +*/ + +#ifndef ELMER_GRID_H +#define ELMER_GRID_H + +void read_grid( + char const * grid_dir, int rank, int size, int num_parts, + int * nbr_vertices, int * nbr_cells, int ** num_vertices_per_cell, + double ** x_vertices, double ** y_vertices, + double ** x_cells, double ** y_cells, + int ** cell_ids, int ** vertex_ids, int ** cell_to_vertex); + +#endif // ELMER_GRID_H From ffd155b19d0302f7957f2402b60e1499fabea147 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 3 Nov 2025 16:15:41 +0100 Subject: [PATCH 13/22] Fix CMake. --- fem/src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fem/src/CMakeLists.txt b/fem/src/CMakeLists.txt index b51fba58d3..100afe7df3 100644 --- a/fem/src/CMakeLists.txt +++ b/fem/src/CMakeLists.txt @@ -49,7 +49,7 @@ SET(solverlib_SOURCES AddrFunc.F90 NavierStokes.F90 NavierStokesGeneral.F90 # COMPILE_DEFINITIONS FULL_INDUCTION) IF(WITH_YAC) - LIST(APPEND solverlib_SOURCES elmer_coupling.F90) + LIST(APPEND solverlib_SOURCES elmer_grid.c elmer_coupling.F90) ENDIF() From 05c1cdea28d152b40459335b6cc95993599c720d Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 3 Nov 2025 16:21:17 +0100 Subject: [PATCH 14/22] Apply suggestions from code review --- fem/src/SParIterComm.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fem/src/SParIterComm.F90 b/fem/src/SParIterComm.F90 index df9c2ec395..22b7fd625a 100644 --- a/fem/src/SParIterComm.F90 +++ b/fem/src/SParIterComm.F90 @@ -308,7 +308,7 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) # ifdef HAVE_YAC ! check config file and set flag USE_YAC - WRITE(config_file,*) "coupling.yaml" + WRITE(config_file,'(A)') "coupling.yaml" INQUIRE(FILE="coupling.yaml", EXIST=USE_YAC) ! add YAC group for comm splitting IF (USE_YAC) THEN @@ -376,7 +376,7 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) CALL coupling_init("coupling.yaml", ELMER_COMM_WORLD,& GROUP_COMMS(COUPLER_GROUP_IDX)) END IF -#endif +#endif ParEnv % ActiveComm = ELMER_COMM_WORLD From 1ca7aaf8f5cf0eeb9d8a14fceeb270d4e8a974e9 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 3 Nov 2025 17:41:31 +0100 Subject: [PATCH 15/22] Check for config file outside of handshake. * Avoids (potential) bug if XIOS is used without handshake * Not necessary for YAC but still easier to understand this way --- fem/src/SParIterComm.F90 | 54 ++++++++++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 16 deletions(-) diff --git a/fem/src/SParIterComm.F90 b/fem/src/SParIterComm.F90 index 22b7fd625a..7f39599395 100644 --- a/fem/src/SParIterComm.F90 +++ b/fem/src/SParIterComm.F90 @@ -244,9 +244,9 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) INTEGER :: ierr INTEGER :: req, prov - CHARACTER(LEN=1024) :: config_file + CHARACTER(LEN=*), PARAMETER :: xios_config_file = "iodef.xml" + CHARACTER(LEN=*), PARAMETER :: yac_config_file = "coupling.yaml" - !****************************************************************** ParallelEnv => ParEnv @@ -285,6 +285,26 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) CALL MPI_COMM_SIZE( MPI_COMM_WORLD, ParEnv % PEs, ierr ) CALL MPI_COMM_RANK( MPI_COMM_WORLD, ParEnv % MyPE, ierr ) +#ifdef HAVE_XIOS + WRITE(Message,'(A,A)') "Check for XIOS config file", xios_config_file + INQUIRE(FILE=xios_config_file, EXIST=USE_XIOS) + IF (USE_XIOS) THEN + WRITE(Message,'(A,A)') "Use XIOS with config file", xios_config_file + ELSE + WRITE(Message,'(A)') "XIOS config file not found, not using XIOS" + END IF +#endif + +#ifdef HAVE_YAC + WRITE(Message,'(A,A)') "Check for YAC config file", yac_config_file + INQUIRE(FILE=yac_config_file, EXIST=USE_YAC) + IF (USE_YAC) THEN + WRITE(Message,'(A,A)') "Use YAC with config file", yac_config_file + ELSE + WRITE(Message,'(A)') "YAC config file not found, not using YAC" + END IF +#endif + #ifdef ELMER_USE_MPI_HANDSHAKE ! Use mpi_handshake for comm splitting ! Add Elmer group for comm splitting @@ -294,7 +314,6 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) GROUP_NAMES(ELMER_GROUP_IDX) = TRIM(ExecID) # ifdef HAVE_XIOS - INQUIRE(FILE="iodef.xml", EXIST=USE_XIOS) ! add XIOS group for comm splitting IF (USE_XIOS) THEN ! Query handshake group label from xios @@ -307,17 +326,14 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) # endif # ifdef HAVE_YAC - ! check config file and set flag USE_YAC - WRITE(config_file,'(A)') "coupling.yaml" - INQUIRE(FILE="coupling.yaml", EXIST=USE_YAC) ! add YAC group for comm splitting IF (USE_YAC) THEN - ! Query mpi_handshake group label from coupler - CALL coupler_get_code_id(COUPLER_LABEL) - ! Add Group for coupler - NUM_GROUPS = NUM_GROUPS + 1 - COUPLER_GROUP_IDX = NUM_GROUPS - GROUP_NAMES(COUPLER_GROUP_IDX) = COUPLER_LABEL + ! Query mpi_handshake group label from coupler + CALL coupler_get_code_id(COUPLER_LABEL) + ! Add Group for coupler + NUM_GROUPS = NUM_GROUPS + 1 + COUPLER_GROUP_IDX = NUM_GROUPS + GROUP_NAMES(COUPLER_GROUP_IDX) = COUPLER_LABEL END IF # endif @@ -351,7 +367,10 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) ! Must have xios and iodef.xml present #ifdef HAVE_XIOS IF (USE_XIOS) THEN - WRITE( Message,'(A)') 'Using XIOS with config-file: iodef.xml' + WRITE( Message,'(A)') & + "Using XIOS with config-file:", & + TRIM(xios_config_file) + CALL INFO("SparIterComm",Message,Level=25) CALL SetExecID() # ifdef ELMER_USE_MPI_HANDSHAKE @@ -368,12 +387,15 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) #ifdef HAVE_YAC IF (USE_YAC) THEN - WRITE(Message,'(A,A)') "Using YAC coupler with config-file:",TRIM(config_file) + WRITE(Message,'(A,A)') & + "Using YAC coupler with config-file:", & + TRIM(yac_config_file) + CALL INFO("SparIterComm",Message,Level=25) ! TODO: Refactor to also provide GROUP_NAMES(XIOS_GROUP_IDX) here - ! CALL coupling_init("coupling.yaml", ELMER_COMM_WORLD,& + ! CALL coupling_init(yac_config_file, ELMER_COMM_WORLD,& ! GROUP_COMMS(COUPLER_GROUP_IDX), GROUP_NAMES(ELMER_GROUP_IDX)) - CALL coupling_init("coupling.yaml", ELMER_COMM_WORLD,& + CALL coupling_init(yac_config_file, ELMER_COMM_WORLD,& GROUP_COMMS(COUPLER_GROUP_IDX)) END IF #endif From 161bf993b43acd6732204b8b9fcf70b9f8e66774 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 3 Nov 2025 17:42:33 +0100 Subject: [PATCH 16/22] Fix warning. --- fem/src/elmer_coupling.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fem/src/elmer_coupling.F90 b/fem/src/elmer_coupling.F90 index e9cac87d8e..bcb615979f 100644 --- a/fem/src/elmer_coupling.F90 +++ b/fem/src/elmer_coupling.F90 @@ -323,7 +323,7 @@ SUBROUTINE coupling_init(coupling_config_file, elmer_comm, yac_comm) IMPLICIT NONE - CHARACTER(LEN=1024), INTENT(IN) :: coupling_config_file + CHARACTER(LEN=*), INTENT(IN) :: coupling_config_file INTEGER, INTENT(IN) :: elmer_comm INTEGER, INTENT(IN) :: yac_comm From 7b0d0396c19babd5919f338ca1ab9d1feb0c91c4 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 3 Nov 2025 17:54:39 +0100 Subject: [PATCH 17/22] Check for config files at beginning. * Use PARAMETER for defining config file name at beginning. * Factor checking for config files out since this is less error prone if features are nested. --- fem/src/SParIterComm.F90 | 41 +++++++++++++++++++++++++++++----------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/fem/src/SParIterComm.F90 b/fem/src/SParIterComm.F90 index f7a3949782..5d82492741 100644 --- a/fem/src/SParIterComm.F90 +++ b/fem/src/SParIterComm.F90 @@ -214,7 +214,8 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) INTEGER :: ierr INTEGER :: req, prov - CHARACTER(LEN=1024) :: config_file + CHARACTER(LEN=*), PARAMETER :: yac_config_file = 'coupling.yaml' + CHARACTER(LEN=*), PARAMETER :: xios_config_file = 'iodef.xml' !****************************************************************** @@ -255,6 +256,26 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) CALL MPI_COMM_SIZE( MPI_COMM_WORLD, ParEnv % PEs, ierr ) CALL MPI_COMM_RANK( MPI_COMM_WORLD, ParEnv % MyPE, ierr ) +#ifdef HAVE_XIOS + WRITE(Message,'(A,A)') "Check for XIOS config file", xios_config_file + INQUIRE(FILE=xios_config_file, EXIST=USE_XIOS) + IF (USE_XIOS) THEN + WRITE(Message,'(A,A)') "Use XIOS with config file", xios_config_file + ELSE + WRITE(Message,'(A)') "XIOS config file not found, not using XIOS" + END IF +#endif + +#ifdef HAVE_YAC + WRITE(Message,'(A,A)') "Check for YAC config file", yac_config_file + INQUIRE(FILE=yac_config_file, EXIST=USE_YAC) + IF (USE_YAC) THEN + WRITE(Message,'(A,A)') "Use YAC with config file", yac_config_file + ELSE + WRITE(Message,'(A)') "YAC config file not found, not using YAC" + END IF +#endif + ! Use mpi_handshake for comm splitting ! TODO how to make sure that mpi_handshake does not conflict with MPI_COMM_SPLIT based on ELMER_COLOUR? @@ -265,7 +286,6 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) GROUP_NAMES(ELMER_GROUP_IDX) = TRIM(ExecID) #ifdef HAVE_XIOS - INQUIRE(FILE="iodef.xml", EXIST=USE_XIOS) ! add XIOS group for comm splitting IF (USE_XIOS) THEN ! Query handshake group label from xios @@ -278,9 +298,6 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) #endif #ifdef HAVE_YAC - ! check config file and set flag USE_YAC - WRITE(config_file,'(A)') "coupling.yaml" - INQUIRE(FILE="coupling.yaml", EXIST=USE_YAC) ! add YAC group for comm splitting IF (USE_YAC) THEN ! Query mpi_handshake group label from coupler @@ -303,11 +320,12 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) ELMER_COMM_WORLD = GROUP_COMMS(ELMER_GROUP_IDX) ! Set ELMER_COMM_WORLD determined through mpi_handshake ! Use XIOS library for IO -! Must have xios and iodef.xml present +! Must HAVE_XIOS and xios_config_file present #ifdef HAVE_XIOS - INQUIRE(FILE="iodef.xml", EXIST=USE_XIOS) IF (USE_XIOS) THEN - WRITE(Message,*) "Using XIOS with config-file: iodef.xml" + WRITE( Message,'(A)') & + "Using XIOS with config-file:", & + TRIM(xios_config_file) CALL INFO("SparIterComm",Message,Level=25) CALL SetExecID() CALL xios_initialize(TRIM(ExecID), global_comm=GROUP_COMMS(XIOS_GROUP_IDX)) @@ -332,11 +350,12 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) ! Use YAC library for coupling ! #ifdef HAVE_YAC - INQUIRE(FILE="coupling.yaml", EXIST=USE_YAC) IF (USE_YAC) THEN - WRITE(Message,'(A,A)') "Using YAC coupler with config-file:",TRIM(config_file) + WRITE(Message,'(A,A)') & + "Using YAC coupler with config-file:", & + TRIM(yac_config_file) CALL INFO("SparIterComm",Message,Level=25) - CALL coupling_init(TRIM(config_file), ELMER_COMM_WORLD, GROUP_COMMS(COUPLER_GROUP_IDX)) + CALL coupling_init(TRIM(yac_config_file), ELMER_COMM_WORLD, GROUP_COMMS(COUPLER_GROUP_IDX)) END IF #endif From 956ae503c2d5e7719eb1db9bed1930549d9031ed Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 3 Nov 2025 17:55:59 +0100 Subject: [PATCH 18/22] Fix warning. --- fem/src/elmer_coupling.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fem/src/elmer_coupling.F90 b/fem/src/elmer_coupling.F90 index d358deb442..499d82a35f 100644 --- a/fem/src/elmer_coupling.F90 +++ b/fem/src/elmer_coupling.F90 @@ -319,7 +319,7 @@ SUBROUTINE coupling_init(coupling_config_file, elmer_comm, yac_comm) IMPLICIT NONE - CHARACTER(LEN=1024), INTENT(IN) :: coupling_config_file + CHARACTER(LEN=*), INTENT(IN) :: coupling_config_file INTEGER, INTENT(IN) :: elmer_comm INTEGER, INTENT(IN) :: yac_comm From 1d822a65110e63f8679852de2aa515d219554bf4 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 3 Nov 2025 18:17:08 +0100 Subject: [PATCH 19/22] Minor formatting and consistency. --- fem/src/SParIterComm.F90 | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/fem/src/SParIterComm.F90 b/fem/src/SParIterComm.F90 index 5d82492741..8daaae476a 100644 --- a/fem/src/SParIterComm.F90 +++ b/fem/src/SParIterComm.F90 @@ -214,10 +214,10 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) INTEGER :: ierr INTEGER :: req, prov - CHARACTER(LEN=*), PARAMETER :: yac_config_file = 'coupling.yaml' - CHARACTER(LEN=*), PARAMETER :: xios_config_file = 'iodef.xml' + CHARACTER(LEN=*), PARAMETER :: xios_config_file = "iodef.xml" + CHARACTER(LEN=*), PARAMETER :: yac_config_file = "coupling.yaml" + - !****************************************************************** ParallelEnv => ParEnv @@ -257,23 +257,23 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) CALL MPI_COMM_RANK( MPI_COMM_WORLD, ParEnv % MyPE, ierr ) #ifdef HAVE_XIOS - WRITE(Message,'(A,A)') "Check for XIOS config file", xios_config_file - INQUIRE(FILE=xios_config_file, EXIST=USE_XIOS) - IF (USE_XIOS) THEN - WRITE(Message,'(A,A)') "Use XIOS with config file", xios_config_file - ELSE - WRITE(Message,'(A)') "XIOS config file not found, not using XIOS" - END IF + WRITE(Message,'(A,A)') "Check for XIOS config file", xios_config_file + INQUIRE(FILE=xios_config_file, EXIST=USE_XIOS) + IF (USE_XIOS) THEN + WRITE(Message,'(A,A)') "Use XIOS with config file", xios_config_file + ELSE + WRITE(Message,'(A)') "XIOS config file not found, not using XIOS" + END IF #endif #ifdef HAVE_YAC - WRITE(Message,'(A,A)') "Check for YAC config file", yac_config_file - INQUIRE(FILE=yac_config_file, EXIST=USE_YAC) - IF (USE_YAC) THEN - WRITE(Message,'(A,A)') "Use YAC with config file", yac_config_file - ELSE - WRITE(Message,'(A)') "YAC config file not found, not using YAC" - END IF + WRITE(Message,'(A,A)') "Check for YAC config file", yac_config_file + INQUIRE(FILE=yac_config_file, EXIST=USE_YAC) + IF (USE_YAC) THEN + WRITE(Message,'(A,A)') "Use YAC with config file", yac_config_file + ELSE + WRITE(Message,'(A)') "YAC config file not found, not using YAC" + END IF #endif ! Use mpi_handshake for comm splitting From 25e3921c2cf9b897ef7ec2c0ffb6d7ac7b12a8e5 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 3 Nov 2025 18:40:52 +0100 Subject: [PATCH 20/22] Remove (probably) unnecessary TRIM --- fem/src/elmer_coupling.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fem/src/elmer_coupling.F90 b/fem/src/elmer_coupling.F90 index bcb615979f..7a5cee316c 100644 --- a/fem/src/elmer_coupling.F90 +++ b/fem/src/elmer_coupling.F90 @@ -346,7 +346,7 @@ SUBROUTINE coupling_init(coupling_config_file, elmer_comm, yac_comm) ! read configuration file ! * contains calendar, start- and end-date ! * defines couplings - CALL yac_fread_config_yaml(TRIM(coupling_config_file)) + CALL yac_fread_config_yaml(coupling_config_file) ! define component ! * is collective operation for all processes that initialised YAC From 14e187c682a80c86624cb7fa4d1eb29af7e3f9d5 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Tue, 4 Nov 2025 13:05:48 +0100 Subject: [PATCH 21/22] Fix WRITE statement. --- fem/src/SParIterComm.F90 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fem/src/SParIterComm.F90 b/fem/src/SParIterComm.F90 index 8daaae476a..7b598c9ab5 100644 --- a/fem/src/SParIterComm.F90 +++ b/fem/src/SParIterComm.F90 @@ -323,9 +323,10 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) ! Must HAVE_XIOS and xios_config_file present #ifdef HAVE_XIOS IF (USE_XIOS) THEN - WRITE( Message,'(A)') & + WRITE( Message,'(A,A)') & "Using XIOS with config-file:", & TRIM(xios_config_file) + CALL INFO("SparIterComm",Message,Level=25) CALL SetExecID() CALL xios_initialize(TRIM(ExecID), global_comm=GROUP_COMMS(XIOS_GROUP_IDX)) From 2c9858bdce6875a1c902dfb8f22d885274c28e6f Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 3 Nov 2025 17:54:39 +0100 Subject: [PATCH 22/22] Check for config files at beginning. * Use PARAMETER for defining config file name at beginning. * Factor checking for config files out since this is less error prone if features are nested. --- fem/src/SParIterComm.F90 | 44 +++++++++++++++++++++++++++----------- fem/src/elmer_coupling.F90 | 2 +- 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/fem/src/SParIterComm.F90 b/fem/src/SParIterComm.F90 index f7a3949782..7b598c9ab5 100644 --- a/fem/src/SParIterComm.F90 +++ b/fem/src/SParIterComm.F90 @@ -214,9 +214,10 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) INTEGER :: ierr INTEGER :: req, prov - CHARACTER(LEN=1024) :: config_file + CHARACTER(LEN=*), PARAMETER :: xios_config_file = "iodef.xml" + CHARACTER(LEN=*), PARAMETER :: yac_config_file = "coupling.yaml" + - !****************************************************************** ParallelEnv => ParEnv @@ -255,6 +256,26 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) CALL MPI_COMM_SIZE( MPI_COMM_WORLD, ParEnv % PEs, ierr ) CALL MPI_COMM_RANK( MPI_COMM_WORLD, ParEnv % MyPE, ierr ) +#ifdef HAVE_XIOS + WRITE(Message,'(A,A)') "Check for XIOS config file", xios_config_file + INQUIRE(FILE=xios_config_file, EXIST=USE_XIOS) + IF (USE_XIOS) THEN + WRITE(Message,'(A,A)') "Use XIOS with config file", xios_config_file + ELSE + WRITE(Message,'(A)') "XIOS config file not found, not using XIOS" + END IF +#endif + +#ifdef HAVE_YAC + WRITE(Message,'(A,A)') "Check for YAC config file", yac_config_file + INQUIRE(FILE=yac_config_file, EXIST=USE_YAC) + IF (USE_YAC) THEN + WRITE(Message,'(A,A)') "Use YAC with config file", yac_config_file + ELSE + WRITE(Message,'(A)') "YAC config file not found, not using YAC" + END IF +#endif + ! Use mpi_handshake for comm splitting ! TODO how to make sure that mpi_handshake does not conflict with MPI_COMM_SPLIT based on ELMER_COLOUR? @@ -265,7 +286,6 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) GROUP_NAMES(ELMER_GROUP_IDX) = TRIM(ExecID) #ifdef HAVE_XIOS - INQUIRE(FILE="iodef.xml", EXIST=USE_XIOS) ! add XIOS group for comm splitting IF (USE_XIOS) THEN ! Query handshake group label from xios @@ -278,9 +298,6 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) #endif #ifdef HAVE_YAC - ! check config file and set flag USE_YAC - WRITE(config_file,'(A)') "coupling.yaml" - INQUIRE(FILE="coupling.yaml", EXIST=USE_YAC) ! add YAC group for comm splitting IF (USE_YAC) THEN ! Query mpi_handshake group label from coupler @@ -303,11 +320,13 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) ELMER_COMM_WORLD = GROUP_COMMS(ELMER_GROUP_IDX) ! Set ELMER_COMM_WORLD determined through mpi_handshake ! Use XIOS library for IO -! Must have xios and iodef.xml present +! Must HAVE_XIOS and xios_config_file present #ifdef HAVE_XIOS - INQUIRE(FILE="iodef.xml", EXIST=USE_XIOS) IF (USE_XIOS) THEN - WRITE(Message,*) "Using XIOS with config-file: iodef.xml" + WRITE( Message,'(A,A)') & + "Using XIOS with config-file:", & + TRIM(xios_config_file) + CALL INFO("SparIterComm",Message,Level=25) CALL SetExecID() CALL xios_initialize(TRIM(ExecID), global_comm=GROUP_COMMS(XIOS_GROUP_IDX)) @@ -332,11 +351,12 @@ FUNCTION ParCommInit( ) RESULT ( ParallelEnv ) ! Use YAC library for coupling ! #ifdef HAVE_YAC - INQUIRE(FILE="coupling.yaml", EXIST=USE_YAC) IF (USE_YAC) THEN - WRITE(Message,'(A,A)') "Using YAC coupler with config-file:",TRIM(config_file) + WRITE(Message,'(A,A)') & + "Using YAC coupler with config-file:", & + TRIM(yac_config_file) CALL INFO("SparIterComm",Message,Level=25) - CALL coupling_init(TRIM(config_file), ELMER_COMM_WORLD, GROUP_COMMS(COUPLER_GROUP_IDX)) + CALL coupling_init(TRIM(yac_config_file), ELMER_COMM_WORLD, GROUP_COMMS(COUPLER_GROUP_IDX)) END IF #endif diff --git a/fem/src/elmer_coupling.F90 b/fem/src/elmer_coupling.F90 index d358deb442..499d82a35f 100644 --- a/fem/src/elmer_coupling.F90 +++ b/fem/src/elmer_coupling.F90 @@ -319,7 +319,7 @@ SUBROUTINE coupling_init(coupling_config_file, elmer_comm, yac_comm) IMPLICIT NONE - CHARACTER(LEN=1024), INTENT(IN) :: coupling_config_file + CHARACTER(LEN=*), INTENT(IN) :: coupling_config_file INTEGER, INTENT(IN) :: elmer_comm INTEGER, INTENT(IN) :: yac_comm